добавить в блокнот research часть про фильтрацию признаков с помощью SequentialFeatureSelector

lab_2/master
syropiatovvv 1 день назад
Родитель ce02a6966b
Сommit 543a4c6571
Подписано: syropiatovvv
Идентификатор GPG ключа: 297380B8143A31BD

@ -0,0 +1,3 @@
SEQUENTIAL_FEATURE_SELECTOR_PARAMS_COMMON_INCLUDE = [
'k_features', 'forward', 'floating', 'scoring', 'cv', 'fixed_features', 'feature_groups',
]

@ -1,2 +1,3 @@
mlflow>=2.16,<2.22
mlflow >=2.16,<2.22
mlxtend ~=0.23.4
scikit-learn >=1.7.2,<2

@ -48,17 +48,23 @@ mlflow_baseline_run_name: str = 'Baseline model'
# Имя ноговго прогона MLFlow для baseline модели.
mlflow_feateng_run_name: str = 'Model with engineered features'
# Имя ноговго прогона MLFlow для модели, использующей дополнительные признаки
mlflow_feateng_filtered_run_name: str = 'Model with filtered engineered features'
# Имя ноговго прогона MLFlow для модели, использующей дополнительные признаки и фильтрацию признаков
# %%
from collections.abc import Sequence
import os
import pathlib
import pickle
import sys
# %%
import matplotlib
import mlflow
import mlflow.models
import mlflow.sklearn
import mlxtend.feature_selection
import mlxtend.plotting
import sklearn.compose
import sklearn.ensemble
import sklearn.metrics
@ -74,6 +80,7 @@ CODE_PATH = BASE_PATH
sys.path.insert(0, str(CODE_PATH.resolve()))
# %%
from iis_project.mlxtend_utils.feature_selection import SEQUENTIAL_FEATURE_SELECTOR_PARAMS_COMMON_INCLUDE
from iis_project.sklearn_utils import filter_params
from iis_project.sklearn_utils.compose import COLUMN_TRANSFORMER_PARAMS_COMMON_INCLUDE
from iis_project.sklearn_utils.ensemble import RANDOM_FOREST_REGRESSOR_PARAMS_COMMON_EXCLUDE
@ -99,6 +106,16 @@ DATA_PATH = (
else (BASE_PATH / 'data')
)
# %%
def build_sequential_feature_selector(*args, **kwargs):
return mlxtend.feature_selection.SequentialFeatureSelector(*args, **kwargs)
def plot_sequential_feature_selection(feature_selector, *args_rest, **kwargs):
metric_dict = feature_selector.get_metric_dict()
return mlxtend.plotting.plot_sequential_feature_selection(metric_dict, *args_rest, **kwargs)
# %% [markdown]
# ## Загрузка и обзор данных
@ -389,60 +406,65 @@ mlflow_log_model(
features_to_extend_as_polynomial = ('selling_price', 'driven_kms')
features_to_extend_as_spline = ('age',)
# %%
assert set(features_to_extend_as_polynomial) <= {*features_to_scale_to_standard_columns}
assert set(features_to_extend_as_spline) <= {*features_to_scale_to_standard_columns}
preprocess_transformer = sklearn.compose.ColumnTransformer(
[
(
'extend_features_as_polynomial',
sklearn.pipeline.Pipeline([
(
'extend_features',
sklearn.preprocessing.PolynomialFeatures(2, include_bias=False),
# %%
def build_preprocess_transformer():
assert set(features_to_extend_as_polynomial) <= {*features_to_scale_to_standard_columns}
assert set(features_to_extend_as_spline) <= {*features_to_scale_to_standard_columns}
return sklearn.compose.ColumnTransformer(
[
(
'extend_features_as_polynomial',
sklearn.pipeline.Pipeline([
(
'extend_features',
sklearn.preprocessing.PolynomialFeatures(2, include_bias=False),
),
('scale_to_standard', build_features_scaler_standard()),
]),
features_to_extend_as_polynomial,
),
(
'extend_features_as_spline',
sklearn.preprocessing.SplineTransformer(
4, knots='quantile', extrapolation='constant', include_bias=False,
),
('scale_to_standard', build_features_scaler_standard()),
]),
features_to_extend_as_polynomial,
),
(
'extend_features_as_spline',
sklearn.preprocessing.SplineTransformer(
4, knots='quantile', extrapolation='constant', include_bias=False,
features_to_extend_as_spline,
),
features_to_extend_as_spline,
),
(
'scale_to_standard',
build_features_scaler_standard(),
tuple(filter(lambda f: f not in features_to_extend_as_polynomial, features_to_scale_to_standard_columns)),
),
(
'encode_categoricals_wrt_target',
build_categorical_features_encoder_target(random_state=0x2ED6),
features_to_encode_wrt_target_columns,
),
],
remainder='drop',
)
(
'scale_to_standard',
build_features_scaler_standard(),
tuple(filter(lambda f: f not in features_to_extend_as_polynomial, features_to_scale_to_standard_columns)),
),
(
'encode_categoricals_wrt_target',
build_categorical_features_encoder_target(random_state=0x2ED6),
features_to_encode_wrt_target_columns,
),
],
remainder='drop',
)
preprocess_transformer = build_preprocess_transformer()
preprocess_transformer
# %% [markdown]
# Демонстрация предобработки данных:
# %%
df_tfd_features_matrix_test = preprocess_transformer.fit_transform(df_orig_features_test, df_target_test.iloc[:, 0])
df_tfd_features_test = pandas_dataframe_from_transformed_artifacts(df_tfd_features_matrix_test, preprocess_transformer)
preprocess_transformer_tmp = build_preprocess_transformer()
df_augd_features_matrix_train = preprocess_transformer_tmp.fit_transform(df_orig_features_train, df_target_train.iloc[:, 0])
df_augd_features_train = pandas_dataframe_from_transformed_artifacts(df_augd_features_matrix_train, preprocess_transformer_tmp)
del preprocess_transformer_tmp
# %% [markdown]
# Обзор предобработанного датасета:
# %%
df_tfd_features_test.info()
df_augd_features_train.info()
# %%
df_tfd_features_test.head(0x8)
df_augd_features_train.head(0x8)
# %%
regressor = build_regressor(random_state=0x3AEF)
@ -524,4 +546,134 @@ mlflow_log_model(
),
)
# %% [markdown]
# ### Модель с дополнительными и отфильтрованными признаками
# %%
regressor = build_regressor(random_state=0x8EDD)
regressor
# %% [markdown]
# Выбор признаков среди дополненного набора по минимизации MAPE:
# %%
len(df_augd_features_train.columns)
# %%
FILTERED_FEATURES_NUM = (4, 8)
# %%
feature_selector = build_sequential_feature_selector(
regressor, k_features=FILTERED_FEATURES_NUM, forward=True, floating=True, cv=4, scoring='neg_mean_absolute_percentage_error',
verbose=1,
)
feature_selector
# %%
_ = feature_selector.fit(df_augd_features_train, df_target_train.iloc[:, 0])
# %% [markdown]
# Имена выбранных признаков:
# %%
feature_selector.k_feature_names_
# %% [markdown]
# MAPE в зависимости от количества выбранных признаков (указан регион выбора, ограниченный `FILTERED_FEATURES_NUM`):
# %%
fig, ax = plot_sequential_feature_selection(feature_selector, kind='std_dev')
ax.grid(True)
if isinstance(FILTERED_FEATURES_NUM, Sequence):
_ = ax.axvspan(min(FILTERED_FEATURES_NUM), max(FILTERED_FEATURES_NUM), color=matplotlib.colormaps.get_cmap('tab10')(6), alpha=0.15)
# хотелось бы поставить верхнюю границу `len(df_augd_features_train.columns)`, но SequentialFeatureSelector до неё не досчитывает-то
_ = ax.set_xlim((1, (max(FILTERED_FEATURES_NUM) if isinstance(FILTERED_FEATURES_NUM, Sequence) else FILTERED_FEATURES_NUM)))
_ = ax.set_ylim((None, 0.))
# %% [markdown]
# Составной пайплайн:
# %%
pipeline = sklearn.pipeline.Pipeline([
('preprocess', preprocess_transformer),
('select_features', feature_selector),
('regress', regressor),
])
pipeline
# %%
model_params = filter_params(
pipeline.get_params(),
include={
'preprocess': (
False,
{
**{k: True for k in COLUMN_TRANSFORMER_PARAMS_COMMON_INCLUDE},
'extend_features_as_polynomial': {
'extend_features': True,
'scale_to_standard': True,
},
'extend_features_as_spline': True,
'scale_to_standard': True,
'encode_categorical_wrt_target': True,
},
),
'select_features': (
False,
{
**{k: True for k in SEQUENTIAL_FEATURE_SELECTOR_PARAMS_COMMON_INCLUDE},
'estimator': False,
},
),
'regress': (False, True),
},
exclude={
'preprocess': {
'extend_features_as_polynomial': {
'scale_to_standard': STANDARD_SCALER_PARAMS_COMMON_EXCLUDE,
},
'scale_to_standard': STANDARD_SCALER_PARAMS_COMMON_EXCLUDE,
},
'select_features': (), # TODO: ай-яй-яй
'regress': RANDOM_FOREST_REGRESSOR_PARAMS_COMMON_EXCLUDE,
},
)
model_params
# %% [markdown]
# Обучение модели:
# %%
# XXX: SequentialFeatureSelector обучается опять!?
_ = pipeline.fit(df_orig_features_train, df_target_train.iloc[:, 0])
# %% [markdown]
# Оценка качества:
# %%
target_test_predicted = pipeline.predict(df_orig_features_test)
# %% [markdown]
# Метрики качества (MAPE, а также MSE, MAE):
# %%
metrics = score_predictions(df_target_test, target_test_predicted)
metrics
# %%
mlflow_log_model(
pipeline,
model_params=model_params,
metrics={k: float(v) for k, v in metrics.items()},
run_name=mlflow_feateng_filtered_run_name,
model_signature=mlflow_model_signature,
input_example=df_orig_features.head(MODEL_INOUT_EXAMPLE_SIZE),
#pip_requirements=str(MODEL_PIP_REQUIREMENTS_PATH),
comment_file_path=(
model_comment_path
if model_comment_path is not None
else (BASE_PATH / 'research' / model_comment_relpath)
),
)
# %%

Загрузка…
Отмена
Сохранить