下面提供一个使用Xgboost获取特征重要性的示例:
num_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', preprocessing.RobustScaler())])
cat_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='most_frequent')),
('onehot', preprocessing.OneHotEncoder(categories='auto',
sparse=False,
handle_unknown='ignore'))])
from sklearn.compose import ColumnTransformer
numerical_columns = X.columns[X.dtypes != 'category'].tolist()
categorical_columns = X.columns[X.dtypes == 'category'].tolist()
pipeline_procesado = ColumnTransformer(transformers=[
('numerical_preprocessing', num_transformer, numerical_columns),
('categorical_preprocessing', cat_transformer, categorical_columns)],
remainder='passthrough',
verbose=True)
classifier = XGBClassifier()
pipeline = Pipeline([("transform_inputs", pipeline_procesado), ("classifier",
classifier)])
pipeline.fit(X_train, y_train)
onehot_columns = pipeline.named_steps['transform_inputs'].named_transformers_['categorical_preprocessing'].named_steps['onehot'].get_feature_names(input_features=categorical_columns)
X_values = pipeline_procesado.fit_transform(X_train)
df_from_array_pipeline = pd.DataFrame(X_values, columns = numerical_columns + list(onehot_columns) )
feature_importance = pd.Series(data= pipeline.named_steps['classifier'].feature_importances_, index = np.array(numerical_columns + list(onehot_columns)))