23、使用stacking的组合预测器

23、使用stacking的组合预测器

import numpy as np

from sklearn.datasets import fetch_openml

from sklearn.utils import shuffle

def load_ames_housing():

    df = fetch_openml(name="house_prices", as_frame=True)

    X = df.data

    y = df.target

    features = ['YrSold', 'HeatingQC', 'Street', 'YearRemodAdd', 'Heating',

                'MasVnrType', 'BsmtUnfSF', 'Foundation', 'MasVnrArea',

                'MSSubClass', 'ExterQual', 'Condition2', 'GarageCars',

                'GarageType', 'OverallQual', 'TotalBsmtSF', 'BsmtFinSF1',

                'HouseStyle', 'MiscFeature', 'MoSold']

    X = X[features]

    X, y = shuffle(X, y, random_state=0)

    X = X[:600]

    y = y[:600]

    return X, np.log(y)

X, y = load_ames_housing()

from sklearn.compose import make_column_transformer

from sklearn.impute import SimpleImputer

from sklearn.pipeline import make_pipeline

from sklearn.preprocessing import OneHotEncoder

from sklearn.preprocessing import OrdinalEncoder

from sklearn.preprocessing import StandardScaler

cat_cols = X.columns[X.dtypes == 'O']

num_cols = X.columns[X.dtypes == 'float64']

categories = [

    X[column].unique() for column in X[cat_cols]]

for cat in categories:

    cat[cat == None] = 'missing' 

cat_proc_nlin = make_pipeline(

    SimpleImputer(missing_values=None, strategy='constant',

                  fill_value='missing'),

    OrdinalEncoder(categories=categories)

    )

num_proc_nlin = make_pipeline(SimpleImputer(strategy='mean'))

cat_proc_lin = make_pipeline(

    SimpleImputer(missing_values=None,

                  strategy='constant',

                  fill_value='missing'),

    OneHotEncoder(categories=categories)

)

num_proc_lin = make_pipeline(

    SimpleImputer(strategy='mean'),

    StandardScaler()

)

# 用于非线性估计量的变换

processor_nlin = make_column_transformer(

    (cat_proc_nlin, cat_cols),

    (num_proc_nlin, num_cols),

    remainder='passthrough')

# 用于线性估计量的变换

processor_lin = make_column_transformer(

    (cat_proc_lin, cat_cols),

    (num_proc_lin, num_cols),

    remainder='passthrough')

from sklearn.experimental import enable_hist_gradient_boosting 

from sklearn.ensemble import HistGradientBoostingRegressor

from sklearn.ensemble import RandomForestRegressor

from sklearn.ensemble import StackingRegressor

from sklearn.linear_model import LassoCV

from sklearn.linear_model import RidgeCV

lasso_pipeline = make_pipeline(processor_lin,

                              LassoCV())

rf_pipeline = make_pipeline(processor_nlin,

                            RandomForestRegressor(random_state=42))

gradient_pipeline = make_pipeline(

    processor_nlin,

    HistGradientBoostingRegressor(random_state=0))

estimators = [('Random Forest', rf_pipeline),

              ('Lasso', lasso_pipeline),

              ('Gradient Boosting', gradient_pipeline)]

stacking_regressor = StackingRegressor(estimators=estimators,

                                      final_estimator=RidgeCV())

import time

import matplotlib.pyplot as plt

from sklearn.model_selection import cross_validate, cross_val_predict

plt.rcParams['font.sans-serif'] = ['SimHei']

plt.rcParams['axes.unicode_minus'] = False

def plot_regression_results(ax, y_true, y_pred, title, scores, elapsed_time):

    """Scatter plot of the predicted vs true targets."""

    ax.plot([y_true.min(), y_true.max()],

            [y_true.min(), y_true.max()],

            '--r', linewidth=2)

    ax.scatter(y_true, y_pred, alpha=0.2)

    ax.spines['top'].set_visible(False)

    ax.spines['right'].set_visible(False)

    ax.get_xaxis().tick_bottom()

    ax.get_yaxis().tick_left()

    ax.spines['left'].set_position(('outward', 10))

    ax.spines['bottom'].set_position(('outward', 10))

    ax.set_xlim([y_true.min(), y_true.max()])

    ax.set_ylim([y_true.min(), y_true.max()])

    ax.set_xlabel('Measured')

    ax.set_ylabel('Predicted')

    extra = plt.Rectangle((0, 0), 0, 0, fc="w", fill=False,

                          edgecolor='none', linewidth=0)

    ax.legend([extra], [scores], loc='upper left')

    title = title + '\n Evaluation in {:.2f} seconds'.format(elapsed_time)

    ax.set_title(title)

fig, axs = plt.subplots(2, 2, figsize=(9, 7))

axs = np.ravel(axs)

for ax, (name, est) in zip(axs, estimators + [('Stacking Regressor',

                                              stacking_regressor)]):

    start_time = time.time()

    score = cross_validate(est, X, y,

                          scoring=['r2', 'neg_mean_absolute_error'],

                          n_jobs=-1, verbose=0)

    elapsed_time = time.time() - start_time

    y_pred = cross_val_predict(est, X, y, n_jobs=-1, verbose=0)

    plot_regression_results(

        ax, y, y_pred,

        name,

        (r'$R^2={:.2f} \pm {:.2f}$' + '\n' + r'$MAE={:.2f} \pm {:.2f}$')

        .format(np.mean(score['test_r2']),

                np.std(score['test_r2']),

                -np.mean(score['test_neg_mean_absolute_error']),

                np.std(score['test_neg_mean_absolute_error'])),

        elapsed_time)

plt.suptitle('单个预测器与堆叠预测器的比较')

plt.tight_layout()

plt.subplots_adjust(top=0.9)

plt.show()


©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容