Вы не можете выбрать более 25 тем
			Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.
		
		
		
		
		
			
		
			
				
	
	
		
			680 строки
		
	
	
		
			21 KiB
		
	
	
	
		
			Python
		
	
			
		
		
	
	
			680 строки
		
	
	
		
			21 KiB
		
	
	
	
		
			Python
		
	
# ---
 | 
						|
# jupyter:
 | 
						|
#   jupytext:
 | 
						|
#     formats: py:percent,ipynb
 | 
						|
#     text_representation:
 | 
						|
#       extension: .py
 | 
						|
#       format_name: percent
 | 
						|
#       format_version: '1.3'
 | 
						|
#       jupytext_version: 1.17.3
 | 
						|
#   kernelspec:
 | 
						|
#     display_name: python3_venv
 | 
						|
#     language: python
 | 
						|
#     name: python3_venv
 | 
						|
# ---
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# # Исследование и настройка предсказательной модели для цен подержанных автомобилях
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Блокнот использует файл аугментированных данных датасета о подержанных автомобилях, создаваемый блокнотом `eda/cars_eda.py`. См. ниже параметры блокнота для papermill.
 | 
						|
 | 
						|
# %%
 | 
						|
from typing import Optional
 | 
						|
 | 
						|
# %% tags=["parameters"]
 | 
						|
data_aug_pickle_path: Optional[str] = None
 | 
						|
# Полный путь к файлу (pickle) для сохранения очищенного датасета. Если не установлен, используется `data/<data_aug_pickle_relpath>`.
 | 
						|
data_aug_pickle_relpath: str = 'cars.aug.pickle'
 | 
						|
# Путь к файлу (pickle) для сохранения очищенного датасета относительно директории данных `data`. Игнорируется, если установлен data_aug_pickle_path.
 | 
						|
 | 
						|
model_comment_path: Optional[str] = None
 | 
						|
# Полный путь к текстовому файлу с произвольным комментарием для сохранения в MLFlow как артефакт вместе с моделью. Если не установлен, используется `research/<comment_relpath>`.
 | 
						|
model_comment_relpath: str = 'comment.txt'
 | 
						|
# Путь к текстовому файлу с произвольным комментарием для сохранения в MLFlow как артефакт вместе с моделью относительно директории `research`. Игнорируется, если установлен comment_path.
 | 
						|
 | 
						|
mlflow_tracking_server_uri: str = 'http://localhost:5000'
 | 
						|
# URL tracking-сервера MLFlow.
 | 
						|
mlflow_registry_uri: Optional[str] = None
 | 
						|
# URL сервера registry MLFlow (если не указан, используется `mlflow_tracking_server_uri`).
 | 
						|
 | 
						|
mlflow_do_log: bool = False
 | 
						|
# Записывать ли прогоны (runs) в MLFlow.
 | 
						|
mlflow_experiment_id: Optional[str] = None
 | 
						|
# ID эксперимента MLFlow, имеет приоритет над `mlflow_experiment_name`.
 | 
						|
mlflow_experiment_name: Optional[str] = 'Current price predicion for used cars'
 | 
						|
# Имя эксперимента MLFlow (ниже приоритетом, чем `mlflow_experiment_id`).
 | 
						|
mlflow_baseline_run_name: str = 'Baseline model'
 | 
						|
# Имя ноговго прогона MLFlow для baseline модели.
 | 
						|
mlflow_feateng_run_name: str = 'Model with engineered features'
 | 
						|
# Имя ноговго прогона MLFlow для модели, использующей дополнительные признаки
 | 
						|
mlflow_feateng_filtered_run_name: str = 'Model with filtered engineered features'
 | 
						|
# Имя ноговго прогона MLFlow для модели, использующей дополнительные признаки и фильтрацию признаков
 | 
						|
 | 
						|
# %%
 | 
						|
from collections.abc import Sequence
 | 
						|
import os
 | 
						|
import pathlib
 | 
						|
import pickle
 | 
						|
import sys
 | 
						|
 | 
						|
# %%
 | 
						|
import matplotlib
 | 
						|
import mlflow
 | 
						|
import mlflow.models
 | 
						|
import mlflow.sklearn
 | 
						|
import mlxtend.feature_selection
 | 
						|
import mlxtend.plotting
 | 
						|
import sklearn.compose
 | 
						|
import sklearn.ensemble
 | 
						|
import sklearn.metrics
 | 
						|
import sklearn.model_selection
 | 
						|
import sklearn.pipeline
 | 
						|
import sklearn.preprocessing
 | 
						|
 | 
						|
# %%
 | 
						|
BASE_PATH = pathlib.Path('..')
 | 
						|
 | 
						|
# %%
 | 
						|
CODE_PATH = BASE_PATH
 | 
						|
sys.path.insert(0, str(CODE_PATH.resolve()))
 | 
						|
 | 
						|
# %%
 | 
						|
from iis_project.mlxtend_utils.feature_selection import SEQUENTIAL_FEATURE_SELECTOR_PARAMS_COMMON_INCLUDE
 | 
						|
from iis_project.sklearn_utils import filter_params
 | 
						|
from iis_project.sklearn_utils.compose import COLUMN_TRANSFORMER_PARAMS_COMMON_INCLUDE
 | 
						|
from iis_project.sklearn_utils.ensemble import RANDOM_FOREST_REGRESSOR_PARAMS_COMMON_EXCLUDE
 | 
						|
from iis_project.sklearn_utils.pandas import pandas_dataframe_from_transformed_artifacts
 | 
						|
from iis_project.sklearn_utils.preprocessing import STANDARD_SCALER_PARAMS_COMMON_EXCLUDE
 | 
						|
 | 
						|
# %%
 | 
						|
MODEL_INOUT_EXAMPLE_SIZE = 0x10
 | 
						|
 | 
						|
# %%
 | 
						|
mlflow.set_tracking_uri(mlflow_tracking_server_uri)
 | 
						|
if mlflow_registry_uri is not None:
 | 
						|
    mlflow.set_registry_uri(mlflow_registry_uri)
 | 
						|
 | 
						|
# %%
 | 
						|
if mlflow_do_log:
 | 
						|
    mlflow_experiment = mlflow.set_experiment(experiment_name=mlflow_experiment_name, experiment_id=mlflow_experiment_id)
 | 
						|
 | 
						|
# %%
 | 
						|
DATA_PATH = (
 | 
						|
    pathlib.Path(os.path.dirname(data_aug_pickle_path))
 | 
						|
    if data_aug_pickle_path is not None
 | 
						|
    else (BASE_PATH / 'data')
 | 
						|
)
 | 
						|
 | 
						|
 | 
						|
# %%
 | 
						|
def build_sequential_feature_selector(*args, **kwargs):
 | 
						|
    return mlxtend.feature_selection.SequentialFeatureSelector(*args, **kwargs)
 | 
						|
 | 
						|
def plot_sequential_feature_selection(feature_selector, *args_rest, **kwargs):
 | 
						|
    metric_dict = feature_selector.get_metric_dict()
 | 
						|
    return mlxtend.plotting.plot_sequential_feature_selection(metric_dict, *args_rest, **kwargs)
 | 
						|
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# ## Загрузка и обзор данных
 | 
						|
 | 
						|
# %%
 | 
						|
with open(
 | 
						|
    (
 | 
						|
        data_aug_pickle_path
 | 
						|
        if data_aug_pickle_path is not None
 | 
						|
        else (DATA_PATH / data_aug_pickle_relpath)
 | 
						|
    ),
 | 
						|
    'rb',
 | 
						|
) as input_file:
 | 
						|
    df_orig = pickle.load(input_file)
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Обзор датасета:
 | 
						|
 | 
						|
# %%
 | 
						|
len(df_orig)
 | 
						|
 | 
						|
# %%
 | 
						|
df_orig.info()
 | 
						|
 | 
						|
# %%
 | 
						|
df_orig.head(0x10)
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# ## Разделение датасета на выборки
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Выделение признаков и целевых переменных:
 | 
						|
 | 
						|
# %%
 | 
						|
feature_columns = (
 | 
						|
    'selling_price',
 | 
						|
    'driven_kms',
 | 
						|
    'fuel_type',
 | 
						|
    'selling_type',
 | 
						|
    'transmission',
 | 
						|
    #'owner',
 | 
						|
    'age',
 | 
						|
)
 | 
						|
 | 
						|
target_columns = (
 | 
						|
    'present_price',
 | 
						|
)
 | 
						|
 | 
						|
# %%
 | 
						|
features_to_scale_to_standard_columns = (
 | 
						|
    'selling_price',
 | 
						|
    'driven_kms',
 | 
						|
    'age',
 | 
						|
)
 | 
						|
assert all(
 | 
						|
    (col in df_orig.select_dtypes(('number',)).columns)
 | 
						|
    for col in features_to_scale_to_standard_columns
 | 
						|
)
 | 
						|
 | 
						|
features_to_encode_wrt_target_columns = (
 | 
						|
    'fuel_type',
 | 
						|
    'selling_type',
 | 
						|
    'transmission',
 | 
						|
    #'owner',
 | 
						|
)
 | 
						|
assert all(
 | 
						|
    (col in df_orig.select_dtypes(('category', 'object')).columns)
 | 
						|
    for col in features_to_encode_wrt_target_columns
 | 
						|
)
 | 
						|
 | 
						|
# %%
 | 
						|
df_orig_features = df_orig[list(feature_columns)]
 | 
						|
df_target = df_orig[list(target_columns)]
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Разделение на обучающую и тестовую выборки:
 | 
						|
 | 
						|
# %%
 | 
						|
DF_TEST_PORTION = 0.25
 | 
						|
 | 
						|
# %%
 | 
						|
df_orig_features_train, df_orig_features_test, df_target_train, df_target_test = (
 | 
						|
    sklearn.model_selection.train_test_split(
 | 
						|
        df_orig_features, df_target, test_size=DF_TEST_PORTION, random_state=0x7AE6,
 | 
						|
    )
 | 
						|
)
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Размеры обучающей и тестовой выборки соответственно:
 | 
						|
 | 
						|
# %%
 | 
						|
tuple(map(len, (df_target_train, df_target_test)))
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# ## Модели
 | 
						|
 | 
						|
# %%
 | 
						|
#MODEL_PIP_REQUIREMENTS_PATH = BASE_PATH / 'requirements' / 'requirements-isolated-research-model.txt'
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Сигнатура модели для MLFlow:
 | 
						|
 | 
						|
# %%
 | 
						|
mlflow_model_signature = mlflow.models.infer_signature(model_input=df_orig_features, model_output=df_target)
 | 
						|
mlflow_model_signature
 | 
						|
 | 
						|
 | 
						|
# %% [raw] vscode={"languageId": "raw"}
 | 
						|
# input_schema = mlflow.types.schema.Schema([
 | 
						|
#     mlflow.types.schema.ColSpec("double", "selling_price"),
 | 
						|
#     mlflow.types.schema.ColSpec("double", "driven_kms"),
 | 
						|
#     mlflow.types.schema.ColSpec("string", "fuel_type"),
 | 
						|
#     mlflow.types.schema.ColSpec("string", "selling_type"),
 | 
						|
#     mlflow.types.schema.ColSpec("string", "transmission"),
 | 
						|
#     mlflow.types.schema.ColSpec("double", "age"),
 | 
						|
# ])
 | 
						|
#
 | 
						|
# output_schema = mlflow.types.schema.Schema([
 | 
						|
#     mlflow.types.schema.ColSpec("double", "present_price"),
 | 
						|
# ])
 | 
						|
#
 | 
						|
# mlflow_model_signature = mlflow.models.ModelSignature(inputs=input_schema, outputs=output_schema)
 | 
						|
 | 
						|
# %%
 | 
						|
def build_features_scaler_standard():
 | 
						|
    return sklearn.preprocessing.StandardScaler()
 | 
						|
 | 
						|
 | 
						|
# %%
 | 
						|
#def build_categorical_features_encoder_onehot():
 | 
						|
#    return sklearn.preprocessing.OneHotEncoder()
 | 
						|
 | 
						|
def build_categorical_features_encoder_target(*, random_state=None):
 | 
						|
    return sklearn.preprocessing.TargetEncoder(
 | 
						|
        target_type='continuous', smooth='auto', shuffle=True, random_state=random_state,
 | 
						|
    )
 | 
						|
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Регрессор — небольшой случайный лес, цель — минимизация квадрата ошибки предсказания:
 | 
						|
 | 
						|
# %%
 | 
						|
def build_regressor(*, random_state=None):
 | 
						|
    return sklearn.ensemble.RandomForestRegressor(
 | 
						|
        10, criterion='squared_error',
 | 
						|
        max_depth=8, max_features='sqrt',
 | 
						|
        random_state=random_state,
 | 
						|
    )
 | 
						|
 | 
						|
 | 
						|
# %%
 | 
						|
def score_predictions(target_test, target_test_predicted):
 | 
						|
    return {
 | 
						|
        'mse': sklearn.metrics.mean_squared_error(target_test, target_test_predicted),
 | 
						|
        'mae': sklearn.metrics.mean_absolute_error(target_test, target_test_predicted),
 | 
						|
        'mape': sklearn.metrics.mean_absolute_percentage_error(target_test, target_test_predicted),
 | 
						|
    }
 | 
						|
 | 
						|
 | 
						|
# %%
 | 
						|
# использует глобальные переменные mlflow_do_log, mlflow_experiment
 | 
						|
def mlflow_log_model(
 | 
						|
    model,
 | 
						|
    model_params,
 | 
						|
    metrics,
 | 
						|
    *,
 | 
						|
    run_name,
 | 
						|
    model_signature=None,
 | 
						|
    input_example=None,
 | 
						|
    #pip_requirements=None,
 | 
						|
    comment_file_path=None,
 | 
						|
):
 | 
						|
    if not mlflow_do_log:
 | 
						|
        return
 | 
						|
    with mlflow.start_run(experiment_id=mlflow_experiment.experiment_id, run_name=run_name):
 | 
						|
        _ = mlflow.sklearn.log_model(
 | 
						|
            model,
 | 
						|
            'model',
 | 
						|
            signature=model_signature,
 | 
						|
            input_example=input_example,
 | 
						|
            #pip_requirements=pip_requirements,
 | 
						|
        )
 | 
						|
        if model_params is not None:
 | 
						|
            _ = mlflow.log_params(model_params)
 | 
						|
        if metrics is not None:
 | 
						|
            _ = mlflow.log_metrics(metrics)
 | 
						|
        if (comment_file_path is not None) and comment_file_path.exists():
 | 
						|
            mlflow.log_artifact(str(comment_file_path))
 | 
						|
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# ### Baseline модель
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Пайплайн предобработки признаков:
 | 
						|
 | 
						|
# %%
 | 
						|
preprocess_transformer = sklearn.compose.ColumnTransformer(
 | 
						|
    [
 | 
						|
        ('scale_to_standard', build_features_scaler_standard(), features_to_scale_to_standard_columns),
 | 
						|
        (
 | 
						|
            #'encode_categoricals_one_hot',
 | 
						|
            'encode_categoricals_wrt_target',
 | 
						|
            #build_categorical_features_encoder_onehot(),
 | 
						|
            build_categorical_features_encoder_target(random_state=0x2ED6),
 | 
						|
            features_to_encode_wrt_target_columns,
 | 
						|
        ),
 | 
						|
    ],
 | 
						|
    remainder='drop',
 | 
						|
)
 | 
						|
 | 
						|
# %%
 | 
						|
regressor = build_regressor(random_state=0x016B)
 | 
						|
regressor
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Составной пайплайн:
 | 
						|
 | 
						|
# %%
 | 
						|
pipeline = sklearn.pipeline.Pipeline([
 | 
						|
    ('preprocess', preprocess_transformer),
 | 
						|
    ('regress', regressor),
 | 
						|
])
 | 
						|
pipeline
 | 
						|
 | 
						|
# %%
 | 
						|
model_params = filter_params(
 | 
						|
    pipeline.get_params(),
 | 
						|
    include={
 | 
						|
        'preprocess': (
 | 
						|
            False,
 | 
						|
            {
 | 
						|
                **{k: True for k in COLUMN_TRANSFORMER_PARAMS_COMMON_INCLUDE},
 | 
						|
                'scale_to_standard': True,
 | 
						|
                'encode_categorical_wrt_target': True,
 | 
						|
            },
 | 
						|
        ),
 | 
						|
        'regress': (False, True),
 | 
						|
    },
 | 
						|
    exclude={
 | 
						|
        'preprocess': {'scale_to_standard': STANDARD_SCALER_PARAMS_COMMON_EXCLUDE},
 | 
						|
        'regress': RANDOM_FOREST_REGRESSOR_PARAMS_COMMON_EXCLUDE,
 | 
						|
    },
 | 
						|
)
 | 
						|
model_params
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Обучение модели:
 | 
						|
 | 
						|
# %%
 | 
						|
_ = pipeline.fit(df_orig_features_train, df_target_train.iloc[:, 0])
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Оценка качества:
 | 
						|
 | 
						|
# %%
 | 
						|
target_test_predicted = pipeline.predict(df_orig_features_test)
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Метрики качества (MAPE, а также MSE, MAE):
 | 
						|
 | 
						|
# %%
 | 
						|
metrics = score_predictions(df_target_test, target_test_predicted)
 | 
						|
metrics
 | 
						|
 | 
						|
# %%
 | 
						|
mlflow_log_model(
 | 
						|
    pipeline,
 | 
						|
    model_params=model_params,
 | 
						|
    metrics={k: float(v) for k, v in metrics.items()},
 | 
						|
    run_name=mlflow_baseline_run_name,
 | 
						|
    model_signature=mlflow_model_signature,
 | 
						|
    input_example=df_orig_features.head(MODEL_INOUT_EXAMPLE_SIZE),
 | 
						|
    #pip_requirements=str(MODEL_PIP_REQUIREMENTS_PATH),
 | 
						|
    comment_file_path=(
 | 
						|
        model_comment_path
 | 
						|
        if model_comment_path is not None
 | 
						|
        else (BASE_PATH / 'research' / model_comment_relpath)
 | 
						|
    ),
 | 
						|
)
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# ### Модель с дополнительными признаками
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Пайплайн предобработки признаков:
 | 
						|
 | 
						|
# %%
 | 
						|
features_to_extend_as_polynomial = ('selling_price', 'driven_kms')
 | 
						|
features_to_extend_as_spline = ('age',)
 | 
						|
 | 
						|
 | 
						|
# %%
 | 
						|
def build_preprocess_transformer():
 | 
						|
    assert set(features_to_extend_as_polynomial) <= {*features_to_scale_to_standard_columns}
 | 
						|
    assert set(features_to_extend_as_spline) <= {*features_to_scale_to_standard_columns}
 | 
						|
    return sklearn.compose.ColumnTransformer(
 | 
						|
        [
 | 
						|
            (
 | 
						|
                'extend_features_as_polynomial',
 | 
						|
                sklearn.pipeline.Pipeline([
 | 
						|
                    (
 | 
						|
                        'extend_features',
 | 
						|
                        sklearn.preprocessing.PolynomialFeatures(2, include_bias=False),
 | 
						|
                    ),
 | 
						|
                    ('scale_to_standard', build_features_scaler_standard()),
 | 
						|
                ]),
 | 
						|
                features_to_extend_as_polynomial,
 | 
						|
            ),
 | 
						|
            (
 | 
						|
                'extend_features_as_spline',
 | 
						|
                sklearn.preprocessing.SplineTransformer(
 | 
						|
                    4, knots='quantile', extrapolation='constant', include_bias=False,
 | 
						|
                ),
 | 
						|
                features_to_extend_as_spline,
 | 
						|
            ),
 | 
						|
            (
 | 
						|
                'scale_to_standard',
 | 
						|
                build_features_scaler_standard(),
 | 
						|
                tuple(filter(lambda f: f not in features_to_extend_as_polynomial, features_to_scale_to_standard_columns)),
 | 
						|
            ),
 | 
						|
            (
 | 
						|
                'encode_categoricals_wrt_target',
 | 
						|
                build_categorical_features_encoder_target(random_state=0x2ED6),
 | 
						|
                features_to_encode_wrt_target_columns,
 | 
						|
            ),
 | 
						|
        ],
 | 
						|
        remainder='drop',
 | 
						|
    )
 | 
						|
 | 
						|
preprocess_transformer = build_preprocess_transformer()
 | 
						|
preprocess_transformer
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Демонстрация предобработки данных:
 | 
						|
 | 
						|
# %%
 | 
						|
preprocess_transformer_tmp = build_preprocess_transformer()
 | 
						|
df_augd_features_matrix_train = preprocess_transformer_tmp.fit_transform(df_orig_features_train, df_target_train.iloc[:, 0])
 | 
						|
df_augd_features_train = pandas_dataframe_from_transformed_artifacts(df_augd_features_matrix_train, preprocess_transformer_tmp)
 | 
						|
del preprocess_transformer_tmp
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Обзор предобработанного датасета:
 | 
						|
 | 
						|
# %%
 | 
						|
df_augd_features_train.info()
 | 
						|
 | 
						|
# %%
 | 
						|
df_augd_features_train.head(0x8)
 | 
						|
 | 
						|
# %%
 | 
						|
regressor = build_regressor(random_state=0x3AEF)
 | 
						|
regressor
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Составной пайплайн:
 | 
						|
 | 
						|
# %%
 | 
						|
pipeline = sklearn.pipeline.Pipeline([
 | 
						|
    ('preprocess', preprocess_transformer),
 | 
						|
    ('regress', regressor),
 | 
						|
])
 | 
						|
pipeline
 | 
						|
 | 
						|
# %%
 | 
						|
model_params = filter_params(
 | 
						|
    pipeline.get_params(),
 | 
						|
    include={
 | 
						|
        'preprocess': (
 | 
						|
            False,
 | 
						|
            {
 | 
						|
                **{k: True for k in COLUMN_TRANSFORMER_PARAMS_COMMON_INCLUDE},
 | 
						|
                'extend_features_as_polynomial': {
 | 
						|
                    'extend_features': True,
 | 
						|
                    'scale_to_standard': True,
 | 
						|
                },
 | 
						|
                'extend_features_as_spline': True,
 | 
						|
                'scale_to_standard': True,
 | 
						|
                'encode_categorical_wrt_target': True,
 | 
						|
            },
 | 
						|
        ),
 | 
						|
        'regress': (False, True),
 | 
						|
    },
 | 
						|
    exclude={
 | 
						|
        'preprocess': {
 | 
						|
            'extend_features_as_polynomial': {
 | 
						|
                'scale_to_standard': STANDARD_SCALER_PARAMS_COMMON_EXCLUDE,
 | 
						|
            },
 | 
						|
            'scale_to_standard': STANDARD_SCALER_PARAMS_COMMON_EXCLUDE,
 | 
						|
        },
 | 
						|
        'regress': RANDOM_FOREST_REGRESSOR_PARAMS_COMMON_EXCLUDE,
 | 
						|
    },
 | 
						|
)
 | 
						|
model_params
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Обучение модели:
 | 
						|
 | 
						|
# %%
 | 
						|
_ = pipeline.fit(df_orig_features_train, df_target_train.iloc[:, 0])
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Оценка качества:
 | 
						|
 | 
						|
# %%
 | 
						|
target_test_predicted = pipeline.predict(df_orig_features_test)
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Метрики качества (MAPE, а также MSE, MAE):
 | 
						|
 | 
						|
# %%
 | 
						|
metrics = score_predictions(df_target_test, target_test_predicted)
 | 
						|
metrics
 | 
						|
 | 
						|
# %%
 | 
						|
mlflow_log_model(
 | 
						|
    pipeline,
 | 
						|
    model_params=model_params,
 | 
						|
    metrics={k: float(v) for k, v in metrics.items()},
 | 
						|
    run_name=mlflow_feateng_run_name,
 | 
						|
    model_signature=mlflow_model_signature,
 | 
						|
    input_example=df_orig_features.head(MODEL_INOUT_EXAMPLE_SIZE),
 | 
						|
    #pip_requirements=str(MODEL_PIP_REQUIREMENTS_PATH),
 | 
						|
    comment_file_path=(
 | 
						|
        model_comment_path
 | 
						|
        if model_comment_path is not None
 | 
						|
        else (BASE_PATH / 'research' / model_comment_relpath)
 | 
						|
    ),
 | 
						|
)
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# ### Модель с дополнительными и отфильтрованными признаками
 | 
						|
 | 
						|
# %%
 | 
						|
regressor = build_regressor(random_state=0x8EDD)
 | 
						|
regressor
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Выбор признаков среди дополненного набора по минимизации MAPE:
 | 
						|
 | 
						|
# %%
 | 
						|
len(df_augd_features_train.columns)
 | 
						|
 | 
						|
# %%
 | 
						|
FILTERED_FEATURES_NUM = (4, 8)
 | 
						|
 | 
						|
# %%
 | 
						|
feature_selector = build_sequential_feature_selector(
 | 
						|
    regressor, k_features=FILTERED_FEATURES_NUM, forward=True, floating=True, cv=4, scoring='neg_mean_absolute_percentage_error',
 | 
						|
    verbose=1,
 | 
						|
)
 | 
						|
feature_selector
 | 
						|
 | 
						|
# %%
 | 
						|
_ = feature_selector.fit(df_augd_features_train, df_target_train.iloc[:, 0])
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Имена выбранных признаков:
 | 
						|
 | 
						|
# %%
 | 
						|
feature_selector.k_feature_names_
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# MAPE в зависимости от количества выбранных признаков (указан регион выбора, ограниченный `FILTERED_FEATURES_NUM`):
 | 
						|
 | 
						|
# %%
 | 
						|
fig, ax = plot_sequential_feature_selection(feature_selector, kind='std_dev')
 | 
						|
ax.grid(True)
 | 
						|
if isinstance(FILTERED_FEATURES_NUM, Sequence):
 | 
						|
    _ = ax.axvspan(min(FILTERED_FEATURES_NUM), max(FILTERED_FEATURES_NUM), color=matplotlib.colormaps.get_cmap('tab10')(6), alpha=0.15)
 | 
						|
# хотелось бы поставить верхнюю границу `len(df_augd_features_train.columns)`, но SequentialFeatureSelector до неё не досчитывает-то
 | 
						|
_ = ax.set_xlim((1, (max(FILTERED_FEATURES_NUM) if isinstance(FILTERED_FEATURES_NUM, Sequence) else FILTERED_FEATURES_NUM)))
 | 
						|
_ = ax.set_ylim((None, 0.))
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Составной пайплайн:
 | 
						|
 | 
						|
# %%
 | 
						|
pipeline = sklearn.pipeline.Pipeline([
 | 
						|
    ('preprocess', preprocess_transformer),
 | 
						|
    ('select_features', feature_selector),
 | 
						|
    ('regress', regressor),
 | 
						|
])
 | 
						|
pipeline
 | 
						|
 | 
						|
# %%
 | 
						|
model_params = filter_params(
 | 
						|
    pipeline.get_params(),
 | 
						|
    include={
 | 
						|
        'preprocess': (
 | 
						|
            False,
 | 
						|
            {
 | 
						|
                **{k: True for k in COLUMN_TRANSFORMER_PARAMS_COMMON_INCLUDE},
 | 
						|
                'extend_features_as_polynomial': {
 | 
						|
                    'extend_features': True,
 | 
						|
                    'scale_to_standard': True,
 | 
						|
                },
 | 
						|
                'extend_features_as_spline': True,
 | 
						|
                'scale_to_standard': True,
 | 
						|
                'encode_categorical_wrt_target': True,
 | 
						|
            },
 | 
						|
        ),
 | 
						|
        'select_features': (
 | 
						|
            False,
 | 
						|
            {
 | 
						|
                **{k: True for k in SEQUENTIAL_FEATURE_SELECTOR_PARAMS_COMMON_INCLUDE},
 | 
						|
                'estimator': False,
 | 
						|
            },
 | 
						|
        ),
 | 
						|
        'regress': (False, True),
 | 
						|
    },
 | 
						|
    exclude={
 | 
						|
        'preprocess': {
 | 
						|
            'extend_features_as_polynomial': {
 | 
						|
                'scale_to_standard': STANDARD_SCALER_PARAMS_COMMON_EXCLUDE,
 | 
						|
            },
 | 
						|
            'scale_to_standard': STANDARD_SCALER_PARAMS_COMMON_EXCLUDE,
 | 
						|
        },
 | 
						|
        'select_features': (),  # TODO: ай-яй-яй
 | 
						|
        'regress': RANDOM_FOREST_REGRESSOR_PARAMS_COMMON_EXCLUDE,
 | 
						|
    },
 | 
						|
)
 | 
						|
model_params
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Обучение модели:
 | 
						|
 | 
						|
# %%
 | 
						|
# XXX: SequentialFeatureSelector обучается опять!?
 | 
						|
_ = pipeline.fit(df_orig_features_train, df_target_train.iloc[:, 0])
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Оценка качества:
 | 
						|
 | 
						|
# %%
 | 
						|
target_test_predicted = pipeline.predict(df_orig_features_test)
 | 
						|
 | 
						|
# %% [markdown]
 | 
						|
# Метрики качества (MAPE, а также MSE, MAE):
 | 
						|
 | 
						|
# %%
 | 
						|
metrics = score_predictions(df_target_test, target_test_predicted)
 | 
						|
metrics
 | 
						|
 | 
						|
# %%
 | 
						|
mlflow_log_model(
 | 
						|
    pipeline,
 | 
						|
    model_params=model_params,
 | 
						|
    metrics={k: float(v) for k, v in metrics.items()},
 | 
						|
    run_name=mlflow_feateng_filtered_run_name,
 | 
						|
    model_signature=mlflow_model_signature,
 | 
						|
    input_example=df_orig_features.head(MODEL_INOUT_EXAMPLE_SIZE),
 | 
						|
    #pip_requirements=str(MODEL_PIP_REQUIREMENTS_PATH),
 | 
						|
    comment_file_path=(
 | 
						|
        model_comment_path
 | 
						|
        if model_comment_path is not None
 | 
						|
        else (BASE_PATH / 'research' / model_comment_relpath)
 | 
						|
    ),
 | 
						|
)
 | 
						|
 | 
						|
# %%
 |