Coverage for addmo/s3_model_tuning/config/model_tuning_config.py: 100%
29 statements
« prev ^ index » next coverage.py v7.4.4, created at 2025-08-31 13:05 +0000
« prev ^ index » next coverage.py v7.4.4, created at 2025-08-31 13:05 +0000
1import os
2from pydantic import BaseModel, Field, PrivateAttr
3from typing import Optional
4from addmo.util.load_save_utils import root_dir
7class ModelTunerConfig(BaseModel):
10 models: list[str] = Field(
11 ["ScikitMLP_TargetTransformed"], description="List of models to use (available models: ScikitMLP, ScikitLinearReg, ScikitLinearRegNoScaler, ScikitSVR, ScikitMLP_TargetTransformed, SciKerasSequential)",
12 )
14 trainings_per_model: int = Field(
15 1,
16 description="Number of trainings per model to choose the best "
17 "from, this is done to avoid local minima"
18 "during training.",
19 )
21 hyperparameter_tuning_type: str = Field(
22 "OptunaTuner",
23 description="Type of hyperparameter tuning, e.g., OptunaTuner, GridSearchTuner",
24 )
25 hyperparameter_tuning_kwargs: Optional[dict[str, int]] = Field(
26 default_factory=lambda: {"n_trials": 2},
27 description="Kwargs for the tuner. Set default value to n_trials: 2"
28 )
31 validation_score_mechanism: str = Field(
32 "cv", description="Validation score mechanism, e.g., cross validation, holdout"
33 )
34 validation_score_mechanism_kwargs: Optional[dict[str, str]] = Field(
35 default=None, description="Kwargs for the validation score mechanism"
36 )
38 validation_score_splitting: str = Field(
39 "KFold", description="Validation score splitting, e.g., KFold, PredefinedSplit"
40 )
41 validation_score_splitting_kwargs: Optional[dict[str, str]] = Field(
42 default=None, description="Kwargs for the validation score splitter"
43 )
45 validation_score_metric: str = Field(
46 "neg_root_mean_squared_error",
47 description="Validation score metric, e.g., r2, neg_mean_absolute_error",
48 )
49 validation_score_metric_kwargs: Optional[dict[str, str]] = Field(
50 default=None, description="Kwargs for the validation score metric"
51 )
54class ModelTuningExperimentConfig(BaseModel):
55 def __init__(self, **data):
56 super().__init__(**data)
57 self._config_model_tuner = ModelTunerConfig()
59 name_of_raw_data: str = Field(
60 "test_raw_data", description="Refer to the raw system_data connected to this"
61 )
62 name_of_data_tuning_experiment: str = Field(
63 "test_data_tuning",
64 description="Refer to the system_data tuning experiment aka the input system_data for this model tuning experiment",
65 )
66 name_of_model_tuning_experiment: str = Field(
67 "test_model_tuning", description="Set name of the current experiment"
68 )
69 abs_path_to_data: str = Field(
70 os.path.join(root_dir(),'addmo_examples','raw_input_data','InputData.xlsx'),
71 description="Path to the file that has the system_data",
72 )
73 name_of_target: str = Field(
74 "FreshAir Temperature", description="Name of the target variable"
75 )
77 # Model Tuning Variables
78 start_train_val: str = Field(
79 "2016-08-01 00:00",
80 description="Start date and time for training and validation",
81 )
82 stop_train_val: str = Field(
83 "2016-08-14 23:45", description="Stop date and time for training and validation"
84 )
85 start_test: str = Field(
86 "2016-08-15 00:00", description="Start date and time for testing"
87 )
88 end_test: str = Field(
89 "2016-08-16 23:45", description="End date and time for testing"
90 )
91 _config_model_tuner: Optional[ModelTunerConfig] = PrivateAttr()