From 543a4c6571225e3b71e6d6573f2756434d6db219 Mon Sep 17 00:00:00 2001 From: syropiatovvv Date: Sat, 1 Nov 2025 17:40:16 +0300 Subject: [PATCH] =?UTF-8?q?=D0=B4=D0=BE=D0=B1=D0=B0=D0=B2=D0=B8=D1=82?= =?UTF-8?q?=D1=8C=20=D0=B2=20=D0=B1=D0=BB=D0=BE=D0=BA=D0=BD=D0=BE=D1=82=20?= =?UTF-8?q?research=20=D1=87=D0=B0=D1=81=D1=82=D1=8C=20=D0=BF=D1=80=D0=BE?= =?UTF-8?q?=20=D1=84=D0=B8=D0=BB=D1=8C=D1=82=D1=80=D0=B0=D1=86=D0=B8=D1=8E?= =?UTF-8?q?=20=D0=BF=D1=80=D0=B8=D0=B7=D0=BD=D0=B0=D0=BA=D0=BE=D0=B2=20?= =?UTF-8?q?=D1=81=20=D0=BF=D0=BE=D0=BC=D0=BE=D1=89=D1=8C=D1=8E=20Sequentia?= =?UTF-8?q?lFeatureSelector?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- iis_project/mlxtend_utils/__init__.py | 0 .../mlxtend_utils/feature_selection.py | 3 + ...rements-isolated-research-model.txt.unused | 1 + requirements/requirements-research.txt | 3 +- research/research.py | 228 +++++++++++++++--- 5 files changed, 196 insertions(+), 39 deletions(-) create mode 100644 iis_project/mlxtend_utils/__init__.py create mode 100644 iis_project/mlxtend_utils/feature_selection.py diff --git a/iis_project/mlxtend_utils/__init__.py b/iis_project/mlxtend_utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/iis_project/mlxtend_utils/feature_selection.py b/iis_project/mlxtend_utils/feature_selection.py new file mode 100644 index 0000000..782463b --- /dev/null +++ b/iis_project/mlxtend_utils/feature_selection.py @@ -0,0 +1,3 @@ +SEQUENTIAL_FEATURE_SELECTOR_PARAMS_COMMON_INCLUDE = [ + 'k_features', 'forward', 'floating', 'scoring', 'cv', 'fixed_features', 'feature_groups', +] diff --git a/requirements/requirements-isolated-research-model.txt.unused b/requirements/requirements-isolated-research-model.txt.unused index adc685d..2ec3b71 100644 --- a/requirements/requirements-isolated-research-model.txt.unused +++ b/requirements/requirements-isolated-research-model.txt.unused @@ -1 +1,2 @@ +mlxtend ~=0.23.4 scikit-learn >=1.7.2,<2 diff --git a/requirements/requirements-research.txt b/requirements/requirements-research.txt index b237e86..a7510fc 100644 --- a/requirements/requirements-research.txt +++ b/requirements/requirements-research.txt @@ -1,2 +1,3 @@ -mlflow>=2.16,<2.22 +mlflow >=2.16,<2.22 +mlxtend ~=0.23.4 scikit-learn >=1.7.2,<2 diff --git a/research/research.py b/research/research.py index 3d27fb6..f6dba6d 100644 --- a/research/research.py +++ b/research/research.py @@ -48,17 +48,23 @@ mlflow_baseline_run_name: str = 'Baseline model' # Имя ноговго прогона MLFlow для baseline модели. mlflow_feateng_run_name: str = 'Model with engineered features' # Имя ноговго прогона MLFlow для модели, использующей дополнительные признаки +mlflow_feateng_filtered_run_name: str = 'Model with filtered engineered features' +# Имя ноговго прогона MLFlow для модели, использующей дополнительные признаки и фильтрацию признаков # %% +from collections.abc import Sequence import os import pathlib import pickle import sys # %% +import matplotlib import mlflow import mlflow.models import mlflow.sklearn +import mlxtend.feature_selection +import mlxtend.plotting import sklearn.compose import sklearn.ensemble import sklearn.metrics @@ -74,6 +80,7 @@ CODE_PATH = BASE_PATH sys.path.insert(0, str(CODE_PATH.resolve())) # %% +from iis_project.mlxtend_utils.feature_selection import SEQUENTIAL_FEATURE_SELECTOR_PARAMS_COMMON_INCLUDE from iis_project.sklearn_utils import filter_params from iis_project.sklearn_utils.compose import COLUMN_TRANSFORMER_PARAMS_COMMON_INCLUDE from iis_project.sklearn_utils.ensemble import RANDOM_FOREST_REGRESSOR_PARAMS_COMMON_EXCLUDE @@ -99,6 +106,16 @@ DATA_PATH = ( else (BASE_PATH / 'data') ) + +# %% +def build_sequential_feature_selector(*args, **kwargs): + return mlxtend.feature_selection.SequentialFeatureSelector(*args, **kwargs) + +def plot_sequential_feature_selection(feature_selector, *args_rest, **kwargs): + metric_dict = feature_selector.get_metric_dict() + return mlxtend.plotting.plot_sequential_feature_selection(metric_dict, *args_rest, **kwargs) + + # %% [markdown] # ## Загрузка и обзор данных @@ -389,60 +406,65 @@ mlflow_log_model( features_to_extend_as_polynomial = ('selling_price', 'driven_kms') features_to_extend_as_spline = ('age',) -# %% -assert set(features_to_extend_as_polynomial) <= {*features_to_scale_to_standard_columns} -assert set(features_to_extend_as_spline) <= {*features_to_scale_to_standard_columns} -preprocess_transformer = sklearn.compose.ColumnTransformer( - [ - ( - 'extend_features_as_polynomial', - sklearn.pipeline.Pipeline([ - ( - 'extend_features', - sklearn.preprocessing.PolynomialFeatures(2, include_bias=False), +# %% +def build_preprocess_transformer(): + assert set(features_to_extend_as_polynomial) <= {*features_to_scale_to_standard_columns} + assert set(features_to_extend_as_spline) <= {*features_to_scale_to_standard_columns} + return sklearn.compose.ColumnTransformer( + [ + ( + 'extend_features_as_polynomial', + sklearn.pipeline.Pipeline([ + ( + 'extend_features', + sklearn.preprocessing.PolynomialFeatures(2, include_bias=False), + ), + ('scale_to_standard', build_features_scaler_standard()), + ]), + features_to_extend_as_polynomial, + ), + ( + 'extend_features_as_spline', + sklearn.preprocessing.SplineTransformer( + 4, knots='quantile', extrapolation='constant', include_bias=False, ), - ('scale_to_standard', build_features_scaler_standard()), - ]), - features_to_extend_as_polynomial, - ), - ( - 'extend_features_as_spline', - sklearn.preprocessing.SplineTransformer( - 4, knots='quantile', extrapolation='constant', include_bias=False, + features_to_extend_as_spline, ), - features_to_extend_as_spline, - ), - ( - 'scale_to_standard', - build_features_scaler_standard(), - tuple(filter(lambda f: f not in features_to_extend_as_polynomial, features_to_scale_to_standard_columns)), - ), - ( - 'encode_categoricals_wrt_target', - build_categorical_features_encoder_target(random_state=0x2ED6), - features_to_encode_wrt_target_columns, - ), - ], - remainder='drop', -) + ( + 'scale_to_standard', + build_features_scaler_standard(), + tuple(filter(lambda f: f not in features_to_extend_as_polynomial, features_to_scale_to_standard_columns)), + ), + ( + 'encode_categoricals_wrt_target', + build_categorical_features_encoder_target(random_state=0x2ED6), + features_to_encode_wrt_target_columns, + ), + ], + remainder='drop', + ) + +preprocess_transformer = build_preprocess_transformer() preprocess_transformer # %% [markdown] # Демонстрация предобработки данных: # %% -df_tfd_features_matrix_test = preprocess_transformer.fit_transform(df_orig_features_test, df_target_test.iloc[:, 0]) -df_tfd_features_test = pandas_dataframe_from_transformed_artifacts(df_tfd_features_matrix_test, preprocess_transformer) +preprocess_transformer_tmp = build_preprocess_transformer() +df_augd_features_matrix_train = preprocess_transformer_tmp.fit_transform(df_orig_features_train, df_target_train.iloc[:, 0]) +df_augd_features_train = pandas_dataframe_from_transformed_artifacts(df_augd_features_matrix_train, preprocess_transformer_tmp) +del preprocess_transformer_tmp # %% [markdown] # Обзор предобработанного датасета: # %% -df_tfd_features_test.info() +df_augd_features_train.info() # %% -df_tfd_features_test.head(0x8) +df_augd_features_train.head(0x8) # %% regressor = build_regressor(random_state=0x3AEF) @@ -524,4 +546,134 @@ mlflow_log_model( ), ) +# %% [markdown] +# ### Модель с дополнительными и отфильтрованными признаками + +# %% +regressor = build_regressor(random_state=0x8EDD) +regressor + +# %% [markdown] +# Выбор признаков среди дополненного набора по минимизации MAPE: + +# %% +len(df_augd_features_train.columns) + +# %% +FILTERED_FEATURES_NUM = (4, 8) + +# %% +feature_selector = build_sequential_feature_selector( + regressor, k_features=FILTERED_FEATURES_NUM, forward=True, floating=True, cv=4, scoring='neg_mean_absolute_percentage_error', + verbose=1, +) +feature_selector + +# %% +_ = feature_selector.fit(df_augd_features_train, df_target_train.iloc[:, 0]) + +# %% [markdown] +# Имена выбранных признаков: + +# %% +feature_selector.k_feature_names_ + +# %% [markdown] +# MAPE в зависимости от количества выбранных признаков (указан регион выбора, ограниченный `FILTERED_FEATURES_NUM`): + +# %% +fig, ax = plot_sequential_feature_selection(feature_selector, kind='std_dev') +ax.grid(True) +if isinstance(FILTERED_FEATURES_NUM, Sequence): + _ = ax.axvspan(min(FILTERED_FEATURES_NUM), max(FILTERED_FEATURES_NUM), color=matplotlib.colormaps.get_cmap('tab10')(6), alpha=0.15) +# хотелось бы поставить верхнюю границу `len(df_augd_features_train.columns)`, но SequentialFeatureSelector до неё не досчитывает-то +_ = ax.set_xlim((1, (max(FILTERED_FEATURES_NUM) if isinstance(FILTERED_FEATURES_NUM, Sequence) else FILTERED_FEATURES_NUM))) +_ = ax.set_ylim((None, 0.)) + +# %% [markdown] +# Составной пайплайн: + +# %% +pipeline = sklearn.pipeline.Pipeline([ + ('preprocess', preprocess_transformer), + ('select_features', feature_selector), + ('regress', regressor), +]) +pipeline + +# %% +model_params = filter_params( + pipeline.get_params(), + include={ + 'preprocess': ( + False, + { + **{k: True for k in COLUMN_TRANSFORMER_PARAMS_COMMON_INCLUDE}, + 'extend_features_as_polynomial': { + 'extend_features': True, + 'scale_to_standard': True, + }, + 'extend_features_as_spline': True, + 'scale_to_standard': True, + 'encode_categorical_wrt_target': True, + }, + ), + 'select_features': ( + False, + { + **{k: True for k in SEQUENTIAL_FEATURE_SELECTOR_PARAMS_COMMON_INCLUDE}, + 'estimator': False, + }, + ), + 'regress': (False, True), + }, + exclude={ + 'preprocess': { + 'extend_features_as_polynomial': { + 'scale_to_standard': STANDARD_SCALER_PARAMS_COMMON_EXCLUDE, + }, + 'scale_to_standard': STANDARD_SCALER_PARAMS_COMMON_EXCLUDE, + }, + 'select_features': (), # TODO: ай-яй-яй + 'regress': RANDOM_FOREST_REGRESSOR_PARAMS_COMMON_EXCLUDE, + }, +) +model_params + +# %% [markdown] +# Обучение модели: + +# %% +# XXX: SequentialFeatureSelector обучается опять!? +_ = pipeline.fit(df_orig_features_train, df_target_train.iloc[:, 0]) + +# %% [markdown] +# Оценка качества: + +# %% +target_test_predicted = pipeline.predict(df_orig_features_test) + +# %% [markdown] +# Метрики качества (MAPE, а также MSE, MAE): + +# %% +metrics = score_predictions(df_target_test, target_test_predicted) +metrics + +# %% +mlflow_log_model( + pipeline, + model_params=model_params, + metrics={k: float(v) for k, v in metrics.items()}, + run_name=mlflow_feateng_filtered_run_name, + model_signature=mlflow_model_signature, + input_example=df_orig_features.head(MODEL_INOUT_EXAMPLE_SIZE), + #pip_requirements=str(MODEL_PIP_REQUIREMENTS_PATH), + comment_file_path=( + model_comment_path + if model_comment_path is not None + else (BASE_PATH / 'research' / model_comment_relpath) + ), +) + # %%