Coverage for tests/E2E_test.py: 98%

43 statements  

« prev     ^ index     » next       coverage.py v7.4.4, created at 2025-08-31 13:05 +0000

1import unittest 

2import os 

3import shutil 

4import pandas as pd 

5from pathlib import Path 

6from addmo.util.load_save import load_config_from_json, load_data 

7from addmo.util.definitions import results_dir_data_tuning, results_dir_model_tuning, return_best_model 

8from addmo_examples.executables.exe_data_tuning_fixed import default_config_exe_data_tuning_fixed 

9from addmo_examples.executables.exe_model_tuning import exe_model_tuning 

10from addmo_examples.executables.exe_data_insights import exe_carpet_plots 

11from addmo.s2_data_tuning.config.data_tuning_config import DataTuningFixedConfig 

12from addmo.s3_model_tuning.config.model_tuning_config import ModelTuningExperimentConfig, ModelTunerConfig 

13from addmo.util.load_save_utils import root_dir 

14 

15# This unittest is to test whether all the executables work compatibly with each other. In case new configs are added, the user would have to create executables which tests 

16# the created tuning based on configs, then the model tuning and insights are handled here. Similarly, newly added models can also be tested for existing tuned data and insights. 

17 

18 

19# This is manual end to end test, please ensure to define the paths to new configs in order to test full pipeline 

20 

21class TestAddmoEndToEnd(unittest.TestCase): 

22 

23 @classmethod 

24 def setUpClass(cls): 

25 """ 

26 Test the functionality of addmo_examples in a complete pipeline 

27 """ 

28 # Define data tuning and model tuning setup 

29 cls.data_config = DataTuningFixedConfig() 

30 cls.model_tuner_config_path = os.path.join(root_dir(), "addmo", "s3_model_tuning", "config", "model_tuner_config.json") 

31 cls.model_exp_config_path = os.path.join(root_dir(), "addmo", "s3_model_tuning", "config", "model_tuner_experiment_config.json") 

32 cls.model_config = load_config_from_json(cls.model_exp_config_path, ModelTuningExperimentConfig) 

33 cls.model_tuner_config = load_config_from_json(cls.model_tuner_config_path, ModelTunerConfig) 

34 

35 def test_full_pipeline(self): 

36 

37 # Test Data Tuning executable file 

38 default_config_exe_data_tuning_fixed() 

39 data_dir = results_dir_data_tuning(self.data_config) 

40 tuned_csv = os.path.join(data_dir, "tuned_xy_fixed.csv") 

41 self.assertTrue(os.path.exists(tuned_csv), "Tuned data file missing.") 

42 xy_tuned = load_data(tuned_csv) 

43 self.assertFalse(xy_tuned.empty, "Tuned xy data is empty") 

44 self.assertIsInstance(xy_tuned, pd.DataFrame, "Tuned xy data is not a dataframe") 

45 

46 # Use the tuned data as input data 

47 self.model_config.abs_path_to_data = tuned_csv 

48 

49 # Test Model Tuning executable file 

50 exe_model_tuning(config_exp= self.model_config, config_tuner= self.model_tuner_config) 

51 model_dir = results_dir_model_tuning(self.model_config) 

52 best_model_path = return_best_model(model_dir) 

53 self.assertTrue(os.path.exists(best_model_path), "Best model not saved.") 

54 

55 # Use the results to test a plotting functionality 

56 exe_carpet_plots( 

57 dir=model_dir, 

58 plot_name="carpet_plot", 

59 plot_dir=model_dir, 

60 path_to_regressor=best_model_path, 

61 save=True 

62 ) 

63 carpet_plot_path = os.path.join(model_dir, "carpet_plot.pdf") 

64 self.assertTrue(os.path.exists(carpet_plot_path), "Carpet plot not generated.") 

65 

66 @classmethod 

67 def tearDownClass(cls): 

68 shutil.rmtree(results_dir_data_tuning(cls.data_config), ignore_errors=True) 

69 shutil.rmtree(results_dir_model_tuning(cls.model_config), ignore_errors=True) 

70 

71 

72if __name__ == "__main__": 

73 unittest.main()