记录实战过程中在数据预处理环节用到的方法
randIndex = random.sample(range(trainSize, len(trainData_copy)), 5*trainSize)
titleSet = set(titleData)
for i in titleSet:
count = titleData.count(i)
用文本出现的次数替换非空的地方。词袋模型 Word Count
titleData = allData['title']
titleSet = set(list(titleData))
title_counts = titleData.value_counts()
for i in titleSet:
if isNaN(i):
continue
count = title_counts[i]
titleData.replace(i, count, axis=0, inplace=True)
title = pd.DataFrame(titleData)
allData['title'] = title
def isNaN(num):
return num != num
%matplotlib inline
birth = trainData['birth_date']
birthDate = pd.to_datetime(birth)
end = pd.datetime(2018, 1, 1)
# 计算天数
birthDay = end - birthDate
birthDay.astype('timedelta64[D]')
# timedelta64 转到 int64
trainData['birth_date'] = birthDay.dt.days
trainData['operate_able'] = trainData.iloc[ : , 20:53].mean(axis=1)
trainData['local_able'] = trainData.iloc[ : , 53:64].mean(axis=1)
train_test = pd.get_dummies(train_test,columns=["Embarked"])
train_test = pd.get_dummies(train_test,columns = ['SibSp','Parch','SibSp_Parch'])
df['Name].str.extract()是提取函数,配合正则一起使用
train_test['Name1'] = train_test['Name'].str.extract('.+,(.+)').str.extract( '^(.+?)\.').str.strip()
train_test.loc[train_test["Age"].isnull() ,"age_nan"] = 1
train_test.loc[train_test["Age"].notnull() ,"age_nan"] = 0
返回x所属区间的索引值,半开区间
#将年龄划分四个阶段10以下,10-18,18-30,30-50,50以上
train_test['Age'] = pd.cut(train_test['Age'], bins=[0,10,18,30,50,100],labels=[1,2,3,4,5])
delLocal = np.array(np.where(np.array(trainData['acc_now_delinq']) == 1))
如果x是一个多维数组,则只会沿着它的第一个索引进行混洗。
import numpy as np
shuffle_index = np.random.permutation(60000)
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]
返回沿轴的最大值的索引。
np.argmax(some_digit_scores)
- a : array_like; 输入数组
- axis : int, optional; 默认情况下,索引是放在平面数组中,否则沿着指定的轴。
- out : array, optional; 如果提供,结果将被插入到这个数组中。它应该是适当的形状和dtype。
>>> np.dot(3, 4)
>>> np.random.seed(42) # 可设置随机数种子
>>> theta = np.random.randn(2,1)
array([[ 4.21509616],
[ 2.77011339]])
参数
- d0, d1, ..., dn : int, optional;返回的数组维度,应该都是正值。如果没有给出,将返回一个Python float值。
X_new=np.linspace(-3, 3, 100).reshape(100, 1)
X_new_poly = poly_features.transform(X_new)
y_new = lin_reg.predict(X_new_poly)
plt.plot(X, y, "b.")
plt.plot(X_new, y_new, "r-", linewidth=2, label="Predictions")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.legend(loc="upper left", fontsize=14)
plt.axis([-3, 3, 0, 10])
save_fig("quadratic_predictions_plot")
plt.show()
- start : scalar;序列的起始值
- stop : scalar;序列的结束值
- num : int, optional;要生成的样本数量,默认为50个。
- endpoint : bool, optional;若为True则包括结束值,否则不包括结束值,即[start, stop)区间。默认为True。
- dtype : dtype, optional;输出数组的类型,若未给出则从输入数据推断类型。
pd.set_option('display.max_columns', 64)
pd.set_option('display.max_rows', 1000000)
homePath = 'game'
trainPath = os.path.join(homePath, 'train.csv')
testPath = os.path.join(homePath, 'test.csv')
trainData = pd.read_csv(trainPath)
testData = pd.read_csv(testPath)
- ~head() 获取前五行数据,供快速参考。
- ~info() 获取总行数、每个属性的类型、非空值的数量。
- ~value_counts() 获取每个值出现的次数
- ~hist() 直方图的形式展示数值型数据
- ~describe() 简要显示数据的数字特征;例如:总数、平均值、标准差、最大值最小值、25%/50%/75%值
mthsMajorTest = fullData.copy()
- 计算相关性矩阵
corrMatrix = trainData.corr()
corrMatrix['acc_now_delinq'].sort_values(ascending=False) # 降序排列
- 相关系数矩阵图
import numpy
correlations = data.corr() #计算变量之间的相关系数矩阵
# plot correlation matrix
fig = plt.figure() #调用figure创建一个绘图对象
ax = fig.add_subplot(111)
cax = ax.matshow(correlations, vmin=-1, vmax=1) #绘制热力图,从-1到1
fig.colorbar(cax) #将matshow生成热力图设置为颜色渐变条
ticks = numpy.arange(0,9,1) #生成0-9,步长为1
ax.set_xticks(ticks) #生成刻度
ax.set_yticks(ticks)
ax.set_xticklabels(names) #生成x轴标签
ax.set_yticklabels(names)
plt.show()
颜色越深表明二者相关性越强
trainData.drop('acc_now_delinq', axis=1, inplace=True)
# 此方法并不会从内存中释放内存
del fullData['member_id']
termData = list(map(int, termData))
gradeData.replace(['A','B','C','D','E','F','G'], [7,6,5,4,3,2,1],inplace=True)
allData = trainData.append(testData)
allData = pd.concat([trainData, testData], axis=0, ignore_index=True)
termData = termData.str.split(' ', n=2, expand=True)[1]
通过判断自身的值来修改自身对应的值,相当于三目运算符( ? : )
housing["income_cat"].where(housing["income_cat"] < 5, 5.0, inplace=True)
- cond 如果为True则保持原始值,若为False则使用第二个参数other替换值。
- other 替换的目标值
- inplace 是否在数据上执行操作
- x 输入的数据
- y float型,每个元素的上限
housing["income_cat"] = np.ceil(housing["median_income"] / 1.5) # 每个元素都除1.5
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
Return object with labels on given axis omitted where alternately any or all of the data are missing
sample_incomplete_rows.dropna(subset=["total_bedrooms"])
# 用中位数填充
median = housing["total_bedrooms"].median()
sample_incomplete_rows["total_bedrooms"].fillna(median, inplace=True)
allData = subTrain.reset_index()
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
ss.fit(mthsMajorTrain)
mthsMajorTrain_d = ss.transform(mthsMajorTrain)
mthsMajorTest_d = ss.transform(mthsMajorTest)
from sklearn import linear_model
lin = linear_model.BayesianRidge()
lin.fit(mthsMajorTrain_d, mthsMajorTrainLabel)
trainData.loc[(trainData['mths_since_last_major_derog'].isnull()), 'mths_since_last_major_derog'] = lin.predict(mthsMajorTest_d)
import lightgbm as lgb
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': {'l2', 'auc'},
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
lgb_train = lgb.Dataset(totTrain[:400000], totLabel[:400000])
lgb_eval = lgb.Dataset(totTrain[400000:], totLabel[400000:])
gbm = lgb.train(params,
lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
early_stopping_rounds=5)
lgb.plot_importance(gbm, figsize=(10,10))
对于缺失值,一般手动挑选几个重要的特征,然后进行预测
upFeatures = ['revol_util', 'revol_bal', 'annual_inc'] # 通过上一步挑选出的特征
totTrain = totTrain[upFeatures]
totTest = trainData.loc[(trainData['total_rev_hi_lim'].isnull())][upFeatures]
totTest['annual_inc'].fillna(-9999, inplace=True)
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
ss.fit(totTrain)
train_d = ss.transform(totTrain)
test_d = ss.transform(totTest)
from sklearn import linear_model
lin = linear_model.BayesianRidge()
lin.fit(train_d, totLabel)
trainData.loc[(trainData['total_rev_hi_lim'].isnull()), 'total_rev_hi_lim'] = lin.predict(test_d)
trainData['total_acc'].fillna(trainData['total_acc'].median(), inplace=True)
trainData['total_acc'].fillna(trainData['total_acc'].mean(), inplace=True)
各属性必须是数值
from sklearn.preprocessing import Imputer
# 指定用何值替换丢失的值,此处为中位数
imputer = Imputer(strategy="median")
# 使实例适应数据
imputer.fit(housing_num)
# 结果在statistics_ 变量中
imputer.statistics_
# 替换
X = imputer.transform(housing_num)
housing_tr = pd.DataFrame(X, columns=housing_num.columns,
index = list(housing.index.values))
# 预览
housing_tr.loc[sample_incomplete_rows.index.values]
housing_cat = housing['ocean_proximity']
housing_cat.head(10)
# 输出
# 17606 <1H OCEAN
# 18632 <1H OCEAN
# 14650 NEAR OCEAN
# 3230 INLAND
# 3555 <1H OCEAN
# 19480 INLAND
# 8879 <1H OCEAN
# 13685 INLAND
# 4937 <1H OCEAN
# 4861 <1H OCEAN
# Name: ocean_proximity, dtype: object
housing_cat_encoded, housing_categories = housing_cat.factorize()
housing_cat_encoded[:10]
# 输出
# array([0, 0, 1, 2, 0, 2, 0, 2, 0, 0], dtype=int64)
- values : ndarray (1-d);序列
- sort : boolean, default False;根据值排序
- na_sentinel : int, default -1;给未找到赋的值
- size_hint : hint to the hashtable sizer
- labels : the indexer to the original array
- uniques : ndarray (1-d) or Index;当传递的值是Index或Series时,返回独特的索引。
返回值为稀疏矩阵
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder()
housing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(-1,1))
housing_cat_1hot
注意fit_transform()
期望一个二维数组,所以这里将数据reshape了。
housing_cat = housing['ocean_proximity']
housing_cat.head(10)
# 17606 <1H OCEAN
# 18632 <1H OCEAN
# 14650 NEAR OCEAN
# 3230 INLAND
# 3555 <1H OCEAN
# 19480 INLAND
# 8879 <1H OCEAN
# 13685 INLAND
# 4937 <1H OCEAN
# 4861 <1H OCEAN
# Name: ocean_proximity, dtype: object
housing_cat_encoded, housing_categories = housing_cat.factorize()
housing_cat_encoded[:10]
# array([0, 0, 1, 2, 0, 2, 0, 2, 0, 0], dtype=int64)
housing_categories
# Index(['<1H OCEAN', 'NEAR OCEAN', 'INLAND', 'NEAR BAY', 'ISLAND'], dtype='object')
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder()
print(housing_cat_encoded.reshape(-1,1))
housing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(-1,1))
housing_cat_1hot
# [[0]
# [0]
# [1]
# ...,
# [2]
# [0]
# [3]]
# <16512x5 sparse matrix of type '<class 'numpy.float64'>'
# with 16512 stored elements in Compressed Sparse Row format>
LabelEncoder`是一个可以用来将标签规范化的工具类,它可以将标签的编码值范围限定在[0,n_classes-1]。简单来说就是对不连续的数字或者文本进行编号。
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6])
array([0, 0, 1, 2])
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
当然,它也可以用于非数值型标签的编码转换成数值标签(只要它们是可哈希并且可比较的):
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"])
array([2, 2, 1])
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
LabelBinarizer 是一个用来从多类别列表创建标签矩阵的工具类:
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
对于多类别是实例,可以使用:class:MultiLabelBinarizer:
>>> lb = preprocessing.MultiLabelBinarizer()
>>> lb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> lb.classes_
array([1, 2, 3])