Skip to content

Commit

Permalink
Merge pull request #261 from hackingmaterials/tpot_downgrade
Browse files Browse the repository at this point in the history
Tpot downgrade
  • Loading branch information
ardunn authored Nov 8, 2019
2 parents 39c267f + 26bd6c7 commit 0a5d0dd
Show file tree
Hide file tree
Showing 4 changed files with 42 additions and 37 deletions.
12 changes: 6 additions & 6 deletions automatminer/automl/adaptors.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,8 @@ def __init__(self, **tpot_kwargs):
tpot_kwargs["cv"] = tpot_kwargs.get("cv", 5)
tpot_kwargs["n_jobs"] = tpot_kwargs.get("n_jobs", -1)
tpot_kwargs["verbosity"] = tpot_kwargs.get("verbosity", 3)
tpot_kwargs["memory"] = tpot_kwargs.get("memory", "auto")
tpot_kwargs["use_dask"] = True
# tpot_kwargs["memory"] = tpot_kwargs.get("memory", "auto")
# tpot_kwargs["use_dask"] = True

self.mode = None
self.tpot_kwargs = tpot_kwargs
Expand Down Expand Up @@ -109,10 +109,10 @@ def fit(self, df, target, **fit_kwargs):
# Determine learning type based on whether classification or regression
self.mode = regression_or_classification(df[target])

mltype_str = "Classifier" if self.mode == AMM_CLF_NAME else "Regressor"
self.tpot_kwargs["template"] = self.tpot_kwargs.get(
"template", "Selector-Transformer-{}".format(mltype_str)
)
# mltype_str = "Classifier" if self.mode == AMM_CLF_NAME else "Regressor"
# self.tpot_kwargs["template"] = self.tpot_kwargs.get(
# "template", "Selector-Transformer-{}".format(mltype_str)
# )

if self.mode == AMM_CLF_NAME:
self.tpot_kwargs["config_dict"] = self.tpot_kwargs.get(
Expand Down
32 changes: 16 additions & 16 deletions automatminer/automl/config/tpot_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,14 +80,14 @@
"bootstrap": [True, False],
},
"sklearn.linear_model.RidgeCV": {},
"xgboost.XGBRegressor": {
"n_estimators": tree_estimators,
"max_depth": tree_max_depths,
"learning_rate": tree_learning_rates,
"subsample": tree_ensemble_subsample,
"min_child_weight": range(1, 21, 4),
"nthread": [1],
},
# "xgboost.XGBRegressor": {
# "n_estimators": tree_estimators,
# "max_depth": tree_max_depths,
# "learning_rate": tree_learning_rates,
# "subsample": tree_ensemble_subsample,
# "min_child_weight": range(1, 21, 4),
# "nthread": [1],
# },
# Preprocesssors
"sklearn.preprocessing.Binarizer": {"threshold": np.arange(0.0, 1.01, 0.05)},
"sklearn.decomposition.FastICA": {"tol": np.arange(0.0, 1.01, 0.05)},
Expand Down Expand Up @@ -214,14 +214,14 @@
"C": [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1.0, 5.0, 10.0, 15.0, 20.0, 25.0],
"dual": [True, False],
},
"xgboost.XGBClassifier": {
"n_estimators": tree_estimators,
"max_depth": tree_max_depths,
"learning_rate": tree_learning_rates,
"subsample": tree_ensemble_subsample,
"min_child_weight": range(1, 21),
"nthread": [1],
},
# "xgboost.XGBClassifier": {
# "n_estimators": tree_estimators,
# "max_depth": tree_max_depths,
# "learning_rate": tree_learning_rates,
# "subsample": tree_ensemble_subsample,
# "min_child_weight": range(1, 21),
# "nthread": [1],
# },
# Preprocesssors
"sklearn.preprocessing.Binarizer": {"threshold": np.arange(0.0, 1.01, 0.05)},
"sklearn.decomposition.FastICA": {"tol": np.arange(0.0, 1.01, 0.05)},
Expand Down
33 changes: 19 additions & 14 deletions automatminer_dev/submit.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

if __name__ == "__main__":

N_JOBS = 10
N_JOBS = 2

pipe_config = {
"learner_name": "TPOTAdaptor",
Expand All @@ -25,7 +25,8 @@
"max_time_mins": 1440,
"max_eval_time_mins": 20,
"population_size": 200,
"memory": "auto",
#todo: change this back
# "memory": "auto",
"n_jobs": N_JOBS,
},
# "reducer_kwargs": {"reducers": ("corr",)},
Expand All @@ -49,7 +50,7 @@
}

pipe_config_debug = {
"autofeaturizer_kwargs": {"preset": "debug", "n_jobs": 10},
"autofeaturizer_kwargs": {"preset": "debug", "n_jobs": N_JOBS},
"reducer_kwargs": {"reducers": ()},
"learner_name": "rf",
"learner_kwargs": {"n_estimators": 500},
Expand Down Expand Up @@ -77,18 +78,22 @@
"debug"
]

# from automatminer_dev.config import MP_E_FORM, JDFT2D, GLASS
# wf = wf_benchmark("lrc", pipe_config, **GFA, cache=True, tags=tags)
from automatminer_dev.config import EXPT_IS_METAL, EXPT_GAP
worker = "lrc"
# wf = wf_benchmark(worker, pipe_config, **EXPT_IS_METAL, cache=True, tags=tags)
# wf = wf_benchmark(worker, pipe_config, **EXPT_GAP, cache=True, tags=tags)
# wf = wf_benchmark(worker, pipe_config_debug, **EXPT_IS_METAL, cache=True, tags=tags)
wf = wf_benchmark(worker, pipe_config_debug, **EXPT_GAP, cache=True, tags=tags)

wf = wf_evaluate_build(
"cori",
"24 hr tpot express 99% reducing with all mean cleaning samples",
BENCHMARK_FULL_SET,
pipe_config,
include_tests=False,
cache=True,
tags=tags,
)
# wf = wf_evaluate_build(
# "cori",
# "24 hr tpot express 99% reducing with all mean cleaning samples",
# BENCHMARK_FULL_SET,
# pipe_config,
# include_tests=False,
# cache=True,
# tags=tags,
# )

# wf = wf_run_test("local", "initial_test")

Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Direct requirements of this project alone
matminer==0.6.2
xgboost==0.80
tpot==0.11.0
tpot==0.9
skrebate==0.6
pyyaml==5.1.2

Expand Down

0 comments on commit 0a5d0dd

Please # to comment.