diff --git a/test/unit/data/test_data_split.py b/test/unit/data/test_data_split.py index 8415ecfcbf..b4f2b85cc7 100644 --- a/test/unit/data/test_data_split.py +++ b/test/unit/data/test_data_split.py @@ -286,7 +286,7 @@ def test_data_splitting_defines_validation_blocks_correctly(forecast_length, cv_ check_cv_folds, check_split_ratio, check_validation_blocks): """ Checks if validation blocks count defines correctly for different data """ - data = get_ts_data_to_forecast(forecast_length) + data = get_ts_data_to_forecast(forecast_length, 120) data_source_splitter = DataSourceSplitter(cv_folds=cv_folds, split_ratio=split_ratio) data_source_splitter.build(data) assert data_source_splitter.cv_folds == check_cv_folds diff --git a/test/unit/optimizer/test_pipeline_objective_eval.py b/test/unit/optimizer/test_pipeline_objective_eval.py index 15be120e24..1c1ce688e7 100644 --- a/test/unit/optimizer/test_pipeline_objective_eval.py +++ b/test/unit/optimizer/test_pipeline_objective_eval.py @@ -90,8 +90,8 @@ def test_pipeline_objective_evaluate_with_different_metrics(classification_datas ) def test_pipeline_objective_evaluate_with_different_metrics_with_str_labes(pipeline): for metric in ClassificationMetricsEnum: - one_fold_split = OneFoldInputDataSplit() - data_split = partial(one_fold_split.input_split, input_data=classification_dataset_with_str_labels()) + data_splitter = DataSourceSplitter() + data_split = data_splitter.build(classification_dataset_with_str_labels()) check_pipeline = deepcopy(pipeline) objective_eval = PipelineObjectiveEvaluate(MetricsObjective(metric), data_split) fitness = objective_eval(pipeline)