任务3 - 建模(2天)
用逻辑回归、svm和决策树;随机森林和XGBoost进行模型构建,评分方式任意,如准确率等。(不需要考虑模型调参)
时间: 2天
我的结果
import pandas as pd
import warnings
from sklearn.preprocessing import scale
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from xgboost.sklearn import XGBClassifier
# 之前的数据处理
data = pd.read_csv('data.csv', encoding='gbk')
data_clean = data.drop_duplicates()
drop_columns = ['Unnamed: 0', 'custid', 'trade_no', 'bank_card_no', 'source',
'id_name', 'latest_query_time', 'loans_latest_time' ]
for data_col in data.columns:
if len(data[data_col].unique()) == 1 and data_col not in drop_columns:
drop_columns.append(data_col)
data_clean = data_clean.drop(drop_columns, axis=1)
data_clean = pd.get_dummies(data_clean, columns=['reg_preference_for_trad'])
data_clean['student_feature'].fillna(0, inplace=True)
data_cols = data_clean.columns.values
for data_col in data_cols:
fill_value = data_clean[data_col].value_counts().index[0]
data_clean[data_col].fillna(fill_value, inplace=True)
# 采样标签
df_y = data_clean['status']
# 去除标签
df_X = data_clean.drop(columns=['status'])
# 将数据转化为标准数据
df_X = scale(df_X, axis=0)
# 建立模型
lr = LogisticRegression(random_state=2018,tol=1e-6) # 逻辑回归模型
tree = DecisionTreeClassifier(random_state=2018) #决策树模型
svm = SVC(probability=True,random_state=2018,tol=1e-6) # SVM模型
forest=RandomForestClassifier(n_estimators=100,random_state=2018) # 随机森林
Gbdt=GradientBoostingClassifier(random_state=2018) #CBDT
Xgbc=XGBClassifier(random_state=2018) #Xgbc
# 验证
def muti_score(model):
warnings.filterwarnings('ignore')
accuracy = cross_val_score(model, df_X, df_y, scoring='accuracy', cv=5)
precision = cross_val_score(model, df_X, df_y, scoring='precision', cv=5)
recall = cross_val_score(model, df_X, df_y, scoring='recall', cv=5)
f1_score = cross_val_score(model, df_X, df_y, scoring='f1', cv=5)
auc = cross_val_score(model, df_X, df_y, scoring='roc_auc', cv=5)
print("准确率:",accuracy.mean())
print("精确率:",precision.mean())
print("召回率:",recall.mean())
print("F1_score:",f1_score.mean())
print("AUC:",auc.mean())
model_name = ["lr", "tree", "svm", "forest", "Gbdt", "Xgbc"]
for name in model_name:
model=eval(name)
print(name)
muti_score(model)
运行结果:
lr
准确率: 0.790281386338364
精确率: 0.6593920530150161
召回率: 0.3394817341162406
F1_score: 0.448020547401347
AUC: 0.7844017218172162
tree
准确率: 0.6855215864861635
精确率: 0.3843921937868717
召回率: 0.4199571041805844
F1_score: 0.40104848437486407
AUC: 0.5972291706193749
svm
准确率: 0.7858656443483919
精确率: 0.7316569883211543
召回率: 0.23137372103653178
F1_score: 0.35083219482550787
AUC: 0.7648360306702487
forest
准确率: 0.7913335761002059
精确率: 0.7034687227673737
召回率: 0.28838648430083336
F1_score: 0.4078894217688266
AUC: 0.77971546689502
Gbdt
准确率: 0.7955410050455514
精确率: 0.6673074901039089
召回率: 0.36884427411131815
F1_score: 0.4746696021847946
AUC: 0.789352848120004
Xgbc
准确率: 0.7959609498788722
精确率: 0.6740591443898865
召回率: 0.3621215850356879
F1_score: 0.47097961303418573
AUC: 0.7906816486380361
Process finished with exit code 0