1、导入数据 略
2、查看数据 略
3、特征工程
from tqdm import tqdm
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_selection import SelectKBest
#卡方检验
from sklearn.feature_selection import chi2
from sklearn.preprocessing import MinMaxScaler
import xgboost as xgb
import lightgbm as lgb
from catboost import CatBoostRegressor
from sklearn.model_selection import StratifiedKFold, KFokd
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, log_loss
补充部分
之前常用的特征缩放是StandardScaler,搜了一下这次用到的MinMaxScaler,发现常用到的是4种,整理如下:
原始数据
①z-score归一化:缩放到均值为0,方差为1(Standardization—StandardScaler())。常翻译为标准化
处理后
②min-max归一化:缩放到0和1之间(Standardization——MinMaxScaler())。常翻译为归一化
处理后
③缩放到-1和1之间(Standardization——MaxAbsScaler())
处理后
④缩放到0和1之间,保留原始数据的分布(Normalization——Normalizer())
处理后
可以在后续建模时比较不同缩放方法的结果。
3.1 特征预处理
numerical_fea = list(train_data.select_dtypes(exclude=['object']).columns)
catagory_fea = list(filter(lambda x:x not in numerical_fea, list(train_data.columns)))
label= 'isDefault'
numerical_fea.remove(label)
numerical_fea.remove('id')
填充缺失值
#数值型数据采用中位数填充,避免极值影响
train_data[numerical_fea] = train_data[numerical_fea].fillna(train_data[numerical_fea].median())
test_data[numerical_fea] = test_data[numerical_fea].fillna(test_data[numerical_fea].median())
#类别型数据采用众数填充
train_data[catagory_fea] = train_data[catagory_fea].fillna(train_data[catagory_fea].mode())
test_data[catagory_fea] = test_data[catagory_fea].fillna(test_data[catagory_fea].mode())
missing_data(train_data).head()
时间格式处理
for data in [train_data, test_data]:
data['issueDate'] = pd.to_datetime(data['issueDate'], format="%Y-%m-%d")
startdate = datetime.datetime.strptime('2007-06-01', '%Y-%m-%d')
#构造新的时间特征
data['issueDateDT'] = data['issueDate'].apply(lambda x:x-startdate).dt.days
对象类特征转换到数值
#这样做的目的是保持这个字段的顺序性
def employmentLength_to_int(s):
if pd.isnull(s):
return s
else:
return np.int8(s.split()[0])
for data in [train_data, test_data]:
data['employmentLength'].replace(to_replace='10+ years', value='10 years', inplace=True)
data['employmentLength'].replace('< 1 year', '0 year', inplace=True)
data['employmentLength'] = data['employmentLength'].apply(employmentLength_to_int)
train_data['earliesCreditLine'].sample(5)
for data in [train_data, test_data]:
data['earliesCreditLine'] = data['earliesCreditLine'].apply(lambda s:int(s[-4:]))
类别特征处理
cate_features = ['grade', 'subGrade', 'employmentTitle', 'homeOwnership', 'verificationStatus', 'purpose', 'postCode', 'regionCode', \
'applicationType', 'initialListStatus', 'title', 'policyCode']
for f in cate_features:
print(f, "类别数", data[f].nunique())
#grade 类别数 7
#subGrade 类别数 35
#employmentTitle 类别数 79282
#homeOwnership 类别数 6
#verificationStatus 类别数 3
#purpose 类别数 14
#postCode 类别数 889
#regionCode 类别数 51
#applicationType 类别数 2
#initialListStatus 类别数 2
#title 类别数 12058
#policyCode 类别数 1
#等级类类别特征,采用labelencode或自映射
for data in [train_data, test_data]:
data['grade'] = data['grade'].map({'A':1,'B':2,'C':3,'D':4,'E':5,'F':6,'G':7})
#纯分类类别特征,用get_dummies
for data in [train_data, test_data]:
data = pd.get_dummies(data, columns=['subGrade', 'homeOwnership', 'verificationStatus', 'purpose', 'regionCode'], drop_first=True)
def find_outliers_by_3segama(data,fea):
data_std = np.std(data[fea])
data_mean = np.mean(data[fea])
outliers_cut_off = data_std * 3
lower_rule = data_mean - outliers_cut_off
upper_rule = data_mean + outliers_cut_off
data[fea+'_outliers'] = data[fea].apply(lambda x:str('异常值') if x > upper_rule or x < lower_rule else '正常值')
return data
train_data = train_data.copy()
for fea in numerical_fea:
train_data = find_outliers_by_3segama(train_data, fea)
print(train_data[fea+'_outliers'].value_counts())
print(train_data.groupby(fea+'_outliers')['isDefault'].sum())
print('*'*10)
部分结果
#删除异常值
for fea in numerical_fea:
train_data = train_data[train_data[fea+'_outliers']=='正常值']
train_data = train_data.reset_index(drop=True)
3.2 分箱
- 固定宽度分箱:
①除法映射
②对数函数映射 - 分位数分箱
for data in [train_data, test_data]:
data['loanAmnt_bin1'] = np.floor_divide(data['loanAmnt'], 1000)
data['loanAmnt_bin2'] = np.floor(np.log10(data['loanAmnt']))
data['loanAmnt_bin3'] = pd.qcut(data['loanAmnt'], 10, labels=False)
3.3 特征交互
for col in ['grade', 'subGrade']:
temp_dict = train_data.groupby([col])['isDefault'].agg(['mean']).reset_index().rename(columns={'mean': col + '_target_mean'})
temp_dict.index = temp_dict[col].values
temp_dict = temp_dict[col + '_target_mean'].to_dict()
train_data[col + '_target_mean'] = train_data[col].map(temp_dict)
test_data[col + '_target_mean'] = test_data[col].map(temp_dict)
# 其他衍生变量 mean 和 std
for df in [train_data, test_data]:
for item in ['n0','n1','n2','n2.1','n4','n5','n6','n7','n8','n9','n10','n11','n12','n13','n14']:
df['grade_to_mean_' + item] = df['grade'] / df.groupby([item])['grade'].transform('mean')
df['grade_to_std_' + item] = df['grade'] / df.groupby([item])['grade'].transform('std')
3.4 特征编码
#label-encode:subGrade,postCode,title
# 高维类别特征需要进行转换
for col in tqdm(['employmentTitle', 'postCode', 'title','subGrade']):
le = LabelEncoder()
le.fit(list(train_data[col].astype(str).values) + list(test_data[col].astype(str).values))
train_data[col] = le.transform(list(train_data[col].astype(str).values))
test_data[col] = le.transform(list(test_data[col].astype(str).values))
print('Label Encoding 完成')
3.5 其他操作
逻辑回归需要做归一化,去除相关性
- 归一化目的是让训练过程更好更快的收敛,避免特征大吃小的问题
- 去除相关性是增加模型的可解释性,加快预测过程。
# 删除不需要的数据
for data in [train_data, test_data]:
data.drop(['issueDate','id'], axis=1,inplace=True)
查看特征间相关性
data_numeric = train_data[numerical_fea].drop('policyCode', axis=1)
correlation = data_numeric.corr()
f , ax = plt.subplots(figsize = (7, 7))
plt.title('Correlation of Numeric Features with Price',y=1,size=16)
sns.heatmap(correlation,square = True, vmax=0.8)
4 建模示例
定义x,Y变量
features = [f for f in train_data.columns if f not in ['id','issueDate','isDefault'] and '_outliers' not in f]
x_train = train_data[features]
x_test = test_data[features]
y_train = train_data['isDefault']
def cv_model(clf, train_x, train_y, test_x, clf_name):
folds = 5
seed = 2020
kf = KFold(n_splits=folds, shuffle=True, random_state=seed)
train = np.zeros(train_x.shape[0])
test = np.zeros(test_x.shape[0])
cv_scores = []
for i, (train_index, valid_index) in enumerate(kf.split(train_x, train_y)):
print('************************************ {} ************************************'.format(str(i+1)))
trn_x, trn_y, val_x, val_y = train_x.iloc[train_index], train_y[train_index], train_x.iloc[valid_index], train_y[valid_index]
if clf_name == "lgb":
train_matrix = clf.Dataset(trn_x, label=trn_y)
valid_matrix = clf.Dataset(val_x, label=val_y)
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'auc',
'min_child_weight': 5,
'num_leaves': 2 ** 5,
'lambda_l2': 10,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 4,
'learning_rate': 0.1,
'seed': 2020,
'nthread': 28,
'n_jobs':24,
'silent': True,
'verbose': -1,
}
model = clf.train(params, train_matrix, 50000, valid_sets=[train_matrix, valid_matrix], verbose_eval=200,early_stopping_rounds=200)
val_pred = model.predict(val_x, num_iteration=model.best_iteration)
test_pred = model.predict(test_x, num_iteration=model.best_iteration)
# print(list(sorted(zip(features, model.feature_importance("gain")), key=lambda x: x[1], reverse=True))[:20])
if clf_name == "xgb":
train_matrix = clf.DMatrix(trn_x , label=trn_y)
valid_matrix = clf.DMatrix(val_x , label=val_y)
params = {'booster': 'gbtree',
'objective': 'binary:logistic',
'eval_metric': 'auc',
'gamma': 1,
'min_child_weight': 1.5,
'max_depth': 5,
'lambda': 10,
'subsample': 0.7,
'colsample_bytree': 0.7,
'colsample_bylevel': 0.7,
'eta': 0.04,
'tree_method': 'exact',
'seed': 2020,
'nthread': 36,
"silent": True,
}
watchlist = [(train_matrix, 'train'),(valid_matrix, 'eval')]
model = clf.train(params, train_matrix, num_boost_round=50000, evals=watchlist, verbose_eval=200, early_stopping_rounds=200)
val_pred = model.predict(valid_matrix, ntree_limit=model.best_ntree_limit)
test_pred = model.predict(test_x , ntree_limit=model.best_ntree_limit)
if clf_name == "cat":
params = {'learning_rate': 0.05, 'depth': 5, 'l2_leaf_reg': 10, 'bootstrap_type': 'Bernoulli',
'od_type': 'Iter', 'od_wait': 50, 'random_seed': 11, 'allow_writing_files': False}
model = clf(iterations=20000, **params)
model.fit(trn_x, trn_y, eval_set=(val_x, val_y),
cat_features=[], use_best_model=True, verbose=500)
val_pred = model.predict(val_x)
test_pred = model.predict(test_x)
train[valid_index] = val_pred
test = test_pred / kf.n_splits
cv_scores.append(roc_auc_score(val_y, val_pred))
print(cv_scores)
print("%s_scotrainre_list:" % clf_name, cv_scores)
print("%s_score_mean:" % clf_name, np.mean(cv_scores))
print("%s_score_std:" % clf_name, np.std(cv_scores))
return train, test
def lgb_model(x_train, y_train, x_test):
lgb_train, lgb_test = cv_model(lgb, x_train, y_train, x_test, "lgb")
return lgb_train, lgb_test
def xgb_model(x_train, y_train, x_test):
xgb_train, xgb_test = cv_model(xgb, x_train, y_train, x_test, "xgb")
return xgb_train, xgb_test
def cat_model(x_train, y_train, x_test):
cat_train, cat_test = cv_model(CatBoostRegressor, x_train, y_train, x_test, "cat")
lgb_train, lgb_test = lgb_model(x_train, y_train, x_test)
#lgb_score_mean: 0.7314354309481285
#lgb_score_std: 0.001859405045804381