From f48ca2618fed2193ffa929af7a44d3031dfb16dd Mon Sep 17 00:00:00 2001
From: Chi Wang
Date: Fri, 8 Oct 2021 16:09:43 -0700
Subject: [PATCH] warning -> info for low cost partial config (#231)
* warning -> info for low cost partial config
#195, #110
* when n_estimators < 0, use trained_estimator's
* log debug info
* test random seed
* remove "objective"; avoid ZeroDivisionError
* hp config to estimator params
* check type of searcher
* default n_jobs
* try import
* Update searchalgo_auto.py
* CLASSIFICATION
* auto_augment flag
* min_sample_size
* make catboost optional
---
README.md | 4 +-
flaml/automl.py | 66 ++-
flaml/data.py | 11 +-
flaml/ml.py | 4 +-
flaml/model.py | 349 +++++--------
flaml/nlp/hpo/searchalgo_auto.py | 128 +++--
flaml/onlineml/trial_runner.py | 325 +++++++-----
flaml/searcher/blendsearch.py | 38 +-
flaml/searcher/flow2.py | 256 +++++----
flaml/searcher/online_searcher.py | 264 ++++++----
flaml/searcher/search_thread.py | 2 +-
flaml/tune/README.md | 62 ++-
flaml/tune/trial_runner.py | 38 +-
flaml/tune/tune.py | 43 +-
notebook/flaml_automl.ipynb | 825 +++++++++++++++---------------
notebook/flaml_lightgbm.ipynb | 606 ++++++++++------------
notebook/flaml_xgboost.ipynb | 582 ++++++++++-----------
setup.py | 4 +-
test/test_automl.py | 105 ++--
test/test_python_log.py | 54 +-
test/test_training_log.py | 18 +-
test/tune/test_searcher.py | 13 +-
22 files changed, 1938 insertions(+), 1859 deletions(-)
diff --git a/README.md b/README.md
index 5937a76bb..45e3a7145 100644
--- a/README.md
+++ b/README.md
@@ -103,7 +103,7 @@ print(automl.model)
```python
from flaml import AutoML
-from sklearn.datasets import load_boston
+from sklearn.datasets import fetch_california_housing
# Initialize an AutoML instance
automl = AutoML()
# Specify automl goal and constraint
@@ -113,7 +113,7 @@ automl_settings = {
"task": 'regression',
"log_file_name": "test/boston.log",
}
-X_train, y_train = load_boston(return_X_y=True)
+X_train, y_train = fetch_california_housing(return_X_y=True)
# Train with labeled input data
automl.fit(X_train=X_train, y_train=y_train,
**automl_settings)
diff --git a/flaml/automl.py b/flaml/automl.py
index d5441f23d..1b117a8ca 100644
--- a/flaml/automl.py
+++ b/flaml/automl.py
@@ -36,7 +36,7 @@ from .config import (
N_SPLITS,
SAMPLE_MULTIPLY_FACTOR,
)
-from .data import concat
+from .data import concat, CLASSIFICATION
from . import tune
from .training_log import training_log_reader, training_log_writer
@@ -619,7 +619,8 @@ class AutoML:
if issparse(X_train_all):
X_train_all = X_train_all.tocsr()
if (
- self._state.task in ("binary", "multi")
+ self._state.task in CLASSIFICATION
+ and self._auto_augment
and self._state.fit_kwargs.get("sample_weight") is None
and self._split_type not in ["time", "group"]
):
@@ -725,7 +726,7 @@ class AutoML:
y_train, y_val = y_train_all[train_idx], y_train_all[val_idx]
self._state.groups = self._state.groups_all[train_idx]
self._state.groups_val = self._state.groups_all[val_idx]
- elif self._state.task in ("binary", "multi"):
+ elif self._state.task in CLASSIFICATION:
# for classification, make sure the labels are complete in both
# training and validation data
label_set, first = np.unique(y_train_all, return_index=True)
@@ -904,10 +905,11 @@ class AutoML:
n_splits=N_SPLITS,
split_type=None,
groups=None,
- n_jobs=1,
+ n_jobs=-1,
train_best=True,
train_full=False,
record_id=-1,
+ auto_augment=True,
**fit_kwargs,
):
"""Retrain from log file
@@ -943,7 +945,8 @@ class AutoML:
groups: None or array-like | Group labels (with matching length to
y_train) or groups counts (with sum equal to length of y_train)
for training data.
- n_jobs: An integer of the number of threads for training.
+ n_jobs: An integer of the number of threads for training. Use all
+ available resources when n_jobs == -1.
train_best: A boolean of whether to train the best config in the
time budget; if false, train the last config in the budget.
train_full: A boolean of whether to train on the full data. If true,
@@ -952,6 +955,8 @@ class AutoML:
be retrained. By default `record_id = -1` which means this will be
ignored. `record_id = 0` corresponds to the first trial, and
when `record_id >= 0`, `time_budget` will be ignored.
+ auto_augment: boolean, default=True | Whether to automatically
+ augment rare classes.
**fit_kwargs: Other key word arguments to pass to fit() function of
the searched learners, such as sample_weight.
"""
@@ -1018,6 +1023,7 @@ class AutoML:
elif eval_method == "auto":
eval_method = self._decide_eval_method(time_budget)
self.modelcount = 0
+ self._auto_augment = auto_augment
self._prepare_data(eval_method, split_ratio, n_splits)
self._state.time_budget = None
self._state.n_jobs = n_jobs
@@ -1032,7 +1038,7 @@ class AutoML:
self._state.task = get_classification_objective(
len(np.unique(self._y_train_all))
)
- if self._state.task in ("binary", "multi"):
+ if self._state.task in CLASSIFICATION:
assert split_type in [None, "stratified", "uniform", "time", "group"]
self._split_type = (
split_type or self._state.groups is None and "stratified" or "group"
@@ -1191,7 +1197,7 @@ class AutoML:
Returns:
A float for the minimal sample size or None
"""
- return MIN_SAMPLE_TRAIN if self._sample else None
+ return self._min_sample_size if self._sample else None
@property
def max_resource(self) -> Optional[float]:
@@ -1282,7 +1288,7 @@ class AutoML:
sample_weight_val=None,
groups_val=None,
groups=None,
- verbose=1,
+ verbose=3,
retrain_full=True,
split_type=None,
learner_selector="sample",
@@ -1291,8 +1297,10 @@ class AutoML:
seed=None,
n_concurrent_trials=1,
keep_search_state=False,
- append_log=False,
early_stop=False,
+ append_log=False,
+ auto_augment=True,
+ min_sample_size=MIN_SAMPLE_TRAIN,
**fit_kwargs,
):
"""Find a model for a given task
@@ -1375,7 +1383,7 @@ class AutoML:
groups: None or array-like | Group labels (with matching length to
y_train) or groups counts (with sum equal to length of y_train)
for training data.
- verbose: int, default=1 | Controls the verbosity, higher means more
+ verbose: int, default=3 | Controls the verbosity, higher means more
messages.
retrain_full: bool or str, default=True | whether to retrain the
selected model on the full training data when using holdout.
@@ -1412,8 +1420,12 @@ class AutoML:
saving.
early_stop: boolean, default=False | Whether to stop early if the
search is considered to converge.
- append_log: boolean, default=False | whetehr to directly append the log
+ append_log: boolean, default=False | Whetehr to directly append the log
records to the input log file if it exists.
+ auto_augment: boolean, default=True | Whether to automatically
+ augment rare classes.
+ min_sample_size: int, default=MIN_SAMPLE_TRAIN | the minimal sample
+ size when sample=True.
**fit_kwargs: Other key word arguments to pass to fit() function of
the searched learners, such as sample_weight. Include period as
a key word argument for 'forecast' task.
@@ -1435,8 +1447,8 @@ class AutoML:
self._learner_selector = learner_selector
old_level = logger.getEffectiveLevel()
self.verbose = verbose
- if verbose == 0:
- logger.setLevel(logging.WARNING)
+ # if verbose == 0:
+ logger.setLevel(50 - verbose * 10)
if (not mlflow or not mlflow.active_run()) and not logger.handlers:
# Add the console handler.
_ch = logging.StreamHandler()
@@ -1457,12 +1469,14 @@ class AutoML:
and (eval_method == "holdout" and self._state.X_val is None)
or (eval_method == "cv")
)
+ self._auto_augment = auto_augment
+ self._min_sample_size = min_sample_size
self._prepare_data(eval_method, split_ratio, n_splits)
self._sample = (
sample
and task != "rank"
and eval_method != "cv"
- and (MIN_SAMPLE_TRAIN * SAMPLE_MULTIPLY_FACTOR < self._state.data_size)
+ and (self._min_sample_size * SAMPLE_MULTIPLY_FACTOR < self._state.data_size)
)
if "auto" == metric:
if "binary" in self._state.task:
@@ -1584,8 +1598,8 @@ class AutoML:
for state in self._search_states.values():
if state.trained_estimator:
del state.trained_estimator
- if verbose == 0:
- logger.setLevel(old_level)
+ # if verbose == 0:
+ logger.setLevel(old_level)
def _search_parallel(self):
try:
@@ -1631,6 +1645,8 @@ class AutoML:
points_to_evaluate=points_to_evaluate,
)
else:
+ self._state.time_from_start = time.time() - self._start_time_flag
+ time_left = self._state.time_budget - self._state.time_from_start
search_alg = SearchAlgo(
metric="val_loss",
space=space,
@@ -1645,13 +1661,9 @@ class AutoML:
],
metric_constraints=self.metric_constraints,
seed=self._seed,
+ time_budget_s=time_left,
)
search_alg = ConcurrencyLimiter(search_alg, self._n_concurrent_trials)
- self._state.time_from_start = time.time() - self._start_time_flag
- time_left = self._state.time_budget - self._state.time_from_start
- search_alg.set_search_properties(
- None, None, config={"time_budget_s": time_left}
- )
resources_per_trial = (
{"cpu": self._state.n_jobs} if self._state.n_jobs > 1 else None
)
@@ -1782,7 +1794,7 @@ class AutoML:
search_space = search_state.search_space
if self._sample:
prune_attr = "FLAML_sample_size"
- min_resource = MIN_SAMPLE_TRAIN
+ min_resource = self._min_sample_size
max_resource = self._state.data_size
else:
prune_attr = min_resource = max_resource = None
@@ -1840,10 +1852,10 @@ class AutoML:
else:
search_space = None
if self._hpo_method in ("bs", "cfo", "cfocat"):
- search_state.search_alg.set_search_properties(
+ search_state.search_alg.searcher.set_search_properties(
metric=None,
mode=None,
- config={
+ setting={
"metric_target": self._state.best_loss,
},
)
@@ -1852,7 +1864,7 @@ class AutoML:
search_state.training_function,
search_alg=search_state.search_alg,
time_budget_s=min(budget_left, self._state.train_time_limit),
- verbose=max(self.verbose - 1, 0),
+ verbose=max(self.verbose - 3, 0),
use_ray=False,
)
time_used = time.time() - start_run_time
@@ -2077,7 +2089,7 @@ class AutoML:
logger.info(estimators)
if len(estimators) <= 1:
return
- if self._state.task in ("binary", "multi"):
+ if self._state.task in CLASSIFICATION:
from sklearn.ensemble import StackingClassifier as Stacker
else:
from sklearn.ensemble import StackingRegressor as Stacker
@@ -2184,7 +2196,7 @@ class AutoML:
speed = delta_loss / delta_time
if speed:
estimated_cost = max(2 * gap / speed, estimated_cost)
- estimated_cost == estimated_cost or 1e-10
+ estimated_cost == estimated_cost or 1e-9
inv.append(1 / estimated_cost)
else:
estimated_cost = self._eci[i]
diff --git a/flaml/data.py b/flaml/data.py
index cf278c2d3..258f10b9b 100644
--- a/flaml/data.py
+++ b/flaml/data.py
@@ -1,15 +1,18 @@
"""!
- * Copyright (c) 2020-2021 Microsoft Corporation. All rights reserved.
+ * Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License.
"""
import numpy as np
from scipy.sparse import vstack, issparse
import pandas as pd
+
from .training_log import training_log_reader
from datetime import datetime
+CLASSIFICATION = ("binary", "multi", "classification")
+
def load_openml_dataset(
dataset_id, data_dir=None, random_state=0, dataset_format="dataframe"
@@ -300,11 +303,7 @@ class DataTransformer:
)
self._drop = drop
- if task in (
- "binary",
- "multi",
- "classification",
- ) or not pd.api.types.is_numeric_dtype(y):
+ if task in CLASSIFICATION or not pd.api.types.is_numeric_dtype(y):
from sklearn.preprocessing import LabelEncoder
self.label_transformer = LabelEncoder()
diff --git a/flaml/ml.py b/flaml/ml.py
index 19ff9c26c..b2d149565 100644
--- a/flaml/ml.py
+++ b/flaml/ml.py
@@ -33,7 +33,7 @@ from .model import (
ARIMA,
SARIMAX,
)
-from .data import group_counts
+from .data import CLASSIFICATION, group_counts
import logging
@@ -301,7 +301,7 @@ def evaluate_model_CV(
valid_fold_num = total_fold_num = 0
n = kf.get_n_splits()
X_train_split, y_train_split = X_train_all, y_train_all
- if task in ("binary", "multi"):
+ if task in CLASSIFICATION:
labels = np.unique(y_train_all)
else:
labels = None
diff --git a/flaml/model.py b/flaml/model.py
index cdce9812a..6488c56d7 100644
--- a/flaml/model.py
+++ b/flaml/model.py
@@ -13,11 +13,11 @@ from lightgbm import LGBMClassifier, LGBMRegressor, LGBMRanker
from scipy.sparse import issparse
import pandas as pd
from . import tune
-from .data import group_counts
+from .data import group_counts, CLASSIFICATION
import logging
-logger = logging.getLogger(__name__)
+logger = logging.getLogger("flaml.automl")
class BaseEstimator:
@@ -30,24 +30,23 @@ class BaseEstimator:
for both regression and classification
"""
- def __init__(self, task="binary", **params):
+ def __init__(self, task="binary", **config):
"""Constructor
Args:
task: A string of the task type, one of
'binary', 'multi', 'regression', 'rank', 'forecast'
- n_jobs: An integer of the number of parallel threads
- params: A dictionary of the hyperparameter names and values
+ config: A dictionary containing the hyperparameter names
+ and 'n_jobs' as keys. n_jobs is the number of parallel threads.
"""
- self.params = params
+ self.params = self.config2params(config)
self.estimator_class = self._model = None
self._task = task
- if "_estimator_type" in params:
- self._estimator_type = params["_estimator_type"]
- del self.params["_estimator_type"]
+ if "_estimator_type" in config:
+ self._estimator_type = self.params.pop("_estimator_type")
else:
self._estimator_type = (
- "classifier" if task in ("binary", "multi") else "regressor"
+ "classifier" if task in CLASSIFICATION else "regressor"
)
def get_params(self, deep=False):
@@ -83,8 +82,9 @@ class BaseEstimator:
current_time = time.time()
if "groups" in kwargs:
kwargs = kwargs.copy()
+ groups = kwargs.pop("groups")
if self._task == "rank":
- kwargs["group"] = group_counts(kwargs["groups"])
+ kwargs["group"] = group_counts(groups)
# groups_val = kwargs.get('groups_val')
# if groups_val is not None:
# kwargs['eval_group'] = [group_counts(groups_val)]
@@ -92,10 +92,13 @@ class BaseEstimator:
# (kwargs['X_val'], kwargs['y_val'])]
# kwargs['verbose'] = False
# del kwargs['groups_val'], kwargs['X_val'], kwargs['y_val']
- del kwargs["groups"]
X_train = self._preprocess(X_train)
model = self.estimator_class(**self.params)
+ if logger.level == logging.DEBUG:
+ logger.debug(f"flaml.model - {model} fit started")
model.fit(X_train, y_train, **kwargs)
+ if logger.level == logging.DEBUG:
+ logger.debug(f"flaml.model - {model} fit finished")
train_time = time.time() - current_time
self._model = model
return train_time
@@ -143,9 +146,8 @@ class BaseEstimator:
Each element at (i,j) is the probability for instance i to be in
class j
"""
- assert self._task in (
- "binary",
- "multi",
+ assert (
+ self._task in CLASSIFICATION
), "predict_prob() only for classification task."
X_test = self._preprocess(X_test)
return self._model.predict_proba(X_test)
@@ -160,9 +162,10 @@ class BaseEstimator:
Returns:
A dictionary of the search space.
Each key is the name of a hyperparameter, and value is a dict with
- its domain and init_value (optional), cat_hp_cost (optional)
+ its domain (required) and low_cost_init_value, init_value,
+ cat_hp_cost (if applicable).
e.g.,
- {'domain': tune.randint(lower=1, upper=10), 'init_value': 1}
+ {'domain': tune.randint(lower=1, upper=10), 'init_value': 1}.
"""
return {}
@@ -171,11 +174,11 @@ class BaseEstimator:
"""[optional method] memory size of the estimator in bytes
Args:
- config - the dict of the hyperparameter config
+ config - A dict of the hyperparameter config.
Returns:
A float of the memory size required by the estimator to train the
- given config
+ given config.
"""
return 1.0
@@ -189,10 +192,21 @@ class BaseEstimator:
"""[optional method] initialize the class"""
pass
+ def config2params(self, config: dict) -> dict:
+ """[optional method] config dict to params dict
+
+ Args:
+ config - A dict of the hyperparameter config.
+
+ Returns:
+ A dict that will be passed to self.estimator_class's constructor.
+ """
+ return config.copy()
+
class SKLearnEstimator(BaseEstimator):
- def __init__(self, task="binary", **params):
- super().__init__(task, **params)
+ def __init__(self, task="binary", **config):
+ super().__init__(task, **config)
def _preprocess(self, X):
if isinstance(X, pd.DataFrame):
@@ -255,39 +269,22 @@ class LGBMEstimator(BaseEstimator):
},
}
+ def config2params(cls, config: dict) -> dict:
+ params = config.copy()
+ if "log_max_bin" in params:
+ params["max_bin"] = (1 << params.pop("log_max_bin")) - 1
+ return params
+
@classmethod
def size(cls, config):
num_leaves = int(round(config.get("num_leaves") or config["max_leaves"]))
n_estimators = int(round(config["n_estimators"]))
return (num_leaves * 3 + (num_leaves - 1) * 4 + 1.0) * n_estimators * 8
- def __init__(self, task="binary", log_max_bin=8, **params):
- super().__init__(task, **params)
- if "objective" not in self.params:
- # Default: ‘regression’ for LGBMRegressor,
- # ‘binary’ or ‘multiclass’ for LGBMClassifier
- objective = "regression"
- if "binary" in task:
- objective = "binary"
- elif "multi" in task:
- objective = "multiclass"
- elif "rank" == task:
- objective = "lambdarank"
- self.params["objective"] = objective
- if "n_estimators" in self.params:
- self.params["n_estimators"] = int(round(self.params["n_estimators"]))
- if "num_leaves" in self.params:
- self.params["num_leaves"] = int(round(self.params["num_leaves"]))
- if "min_child_samples" in self.params:
- self.params["min_child_samples"] = int(
- round(self.params["min_child_samples"])
- )
- if "max_bin" not in self.params:
- self.params["max_bin"] = 1 << int(round(log_max_bin)) - 1
+ def __init__(self, task="binary", **config):
+ super().__init__(task, **config)
if "verbose" not in self.params:
self.params["verbose"] = -1
- # if "subsample_freq" not in self.params:
- # self.params['subsample_freq'] = 1
if "regression" == task:
self.estimator_class = LGBMRegressor
elif "rank" == task:
@@ -355,7 +352,7 @@ class LGBMEstimator(BaseEstimator):
if self.params["n_estimators"] > 0:
self._fit(X_train, y_train, **kwargs)
else:
- self.params["n_estimators"] = n_iter
+ self.params["n_estimators"] = self._model.n_estimators
train_time = time.time() - start_time
return train_time
@@ -415,56 +412,30 @@ class XGBoostEstimator(SKLearnEstimator):
def cost_relative2lgbm(cls):
return 1.6
+ def config2params(cls, config: dict) -> dict:
+ params = config.copy()
+ params["max_depth"] = params.get("max_depth", 0)
+ params["grow_policy"] = params.get("grow_policy", "lossguide")
+ params["booster"] = params.get("booster", "gbtree")
+ params["use_label_encoder"] = params.get("use_label_encoder", False)
+ params["tree_method"] = params.get("tree_method", "hist")
+ if "n_jobs" in config:
+ params["nthread"] = params.pop("n_jobs")
+ return params
+
def __init__(
self,
task="regression",
- all_thread=False,
- n_jobs=1,
- n_estimators=4,
- max_leaves=4,
- subsample=1.0,
- min_child_weight=1,
- learning_rate=0.1,
- reg_lambda=1.0,
- reg_alpha=0.0,
- colsample_bylevel=1.0,
- colsample_bytree=1.0,
- tree_method="auto",
- **params,
+ **config,
):
- super().__init__(task, **params)
- self._n_estimators = int(round(n_estimators))
- self.params.update(
- {
- "max_leaves": int(round(max_leaves)),
- "max_depth": params.get("max_depth", 0),
- "grow_policy": params.get("grow_policy", "lossguide"),
- "tree_method": tree_method,
- "verbosity": params.get("verbosity", 0),
- "nthread": n_jobs,
- "learning_rate": float(learning_rate),
- "subsample": float(subsample),
- "reg_alpha": float(reg_alpha),
- "reg_lambda": float(reg_lambda),
- "min_child_weight": float(min_child_weight),
- "booster": params.get("booster", "gbtree"),
- "colsample_bylevel": float(colsample_bylevel),
- "colsample_bytree": float(colsample_bytree),
- "objective": params.get("objective"),
- }
- )
- if all_thread:
- del self.params["nthread"]
-
- def get_params(self, deep=False):
- params = super().get_params()
- params["n_jobs"] = params["nthread"]
- return params
+ super().__init__(task, **config)
+ self.params["verbosity"] = 0
def fit(self, X_train, y_train, budget=None, **kwargs):
start_time = time.time()
- if not issparse(X_train):
- self.params["tree_method"] = "hist"
+ if issparse(X_train):
+ self.params["tree_method"] = "auto"
+ else:
X_train = self._preprocess(X_train)
if "sample_weight" in kwargs:
dtrain = xgb.DMatrix(X_train, label=y_train, weight=kwargs["sample_weight"])
@@ -478,8 +449,10 @@ class XGBoostEstimator(SKLearnEstimator):
obj = objective
if "objective" in self.params:
del self.params["objective"]
- self._model = xgb.train(self.params, dtrain, self._n_estimators, obj=obj)
+ _n_estimators = self.params.pop("n_estimators")
+ self._model = xgb.train(self.params, dtrain, _n_estimators, obj=obj)
self.params["objective"] = objective
+ self.params["n_estimators"] = _n_estimators
del dtrain
train_time = time.time() - start_time
return train_time
@@ -502,54 +475,29 @@ class XGBoostSklearnEstimator(SKLearnEstimator, LGBMEstimator):
def cost_relative2lgbm(cls):
return XGBoostEstimator.cost_relative2lgbm()
+ def config2params(cls, config: dict) -> dict:
+ params = config.copy()
+ params["max_depth"] = 0
+ params["grow_policy"] = params.get("grow_policy", "lossguide")
+ params["booster"] = params.get("booster", "gbtree")
+ params["use_label_encoder"] = params.get("use_label_encoder", False)
+ params["tree_method"] = params.get("tree_method", "hist")
+ return params
+
def __init__(
self,
task="binary",
- n_jobs=1,
- n_estimators=4,
- max_leaves=4,
- subsample=1.0,
- min_child_weight=1,
- learning_rate=0.1,
- reg_lambda=1.0,
- reg_alpha=0.0,
- colsample_bylevel=1.0,
- colsample_bytree=1.0,
- tree_method="hist",
- **params,
+ **config,
):
- super().__init__(task, **params)
- del self.params["objective"]
- del self.params["max_bin"]
+ super().__init__(task, **config)
del self.params["verbose"]
- self.params.update(
- {
- "n_estimators": int(round(n_estimators)),
- "max_leaves": int(round(max_leaves)),
- "max_depth": 0,
- "grow_policy": params.get("grow_policy", "lossguide"),
- "tree_method": tree_method,
- "n_jobs": n_jobs,
- "verbosity": 0,
- "learning_rate": float(learning_rate),
- "subsample": float(subsample),
- "reg_alpha": float(reg_alpha),
- "reg_lambda": float(reg_lambda),
- "min_child_weight": float(min_child_weight),
- "booster": params.get("booster", "gbtree"),
- "colsample_bylevel": float(colsample_bylevel),
- "colsample_bytree": float(colsample_bytree),
- "use_label_encoder": params.get("use_label_encoder", False),
- }
- )
+ self.params["verbosity"] = 0
self.estimator_class = xgb.XGBRegressor
if "rank" == task:
self.estimator_class = xgb.XGBRanker
- elif task in ("binary", "multi"):
+ elif task in CLASSIFICATION:
self.estimator_class = xgb.XGBClassifier
- self._time_per_iter = None
- self._train_size = 0
def fit(self, X_train, y_train, budget=None, **kwargs):
if issparse(X_train):
@@ -578,7 +526,7 @@ class RandomForestEstimator(SKLearnEstimator, LGBMEstimator):
"low_cost_init_value": 4,
},
}
- if task in ("binary", "multi"):
+ if task in CLASSIFICATION:
space["criterion"] = {
"domain": tune.choice(["gini", "entropy"]),
# 'init_value': 'gini',
@@ -589,36 +537,24 @@ class RandomForestEstimator(SKLearnEstimator, LGBMEstimator):
def cost_relative2lgbm(cls):
return 2.0
+ def config2params(cls, config: dict) -> dict:
+ params = config.copy()
+ if "max_leaves" in params:
+ params["max_leaf_nodes"] = params.get(
+ "max_leaf_nodes", params.pop("max_leaves")
+ )
+ return params
+
def __init__(
self,
task="binary",
- n_jobs=1,
- n_estimators=4,
- max_features=1.0,
- criterion="gini",
- max_leaves=4,
**params,
):
super().__init__(task, **params)
- del self.params["objective"]
- del self.params["max_bin"]
- self.params.update(
- {
- "n_estimators": int(round(n_estimators)),
- "n_jobs": n_jobs,
- "verbose": 0,
- "max_features": float(max_features),
- "max_leaf_nodes": params.get("max_leaf_nodes", int(round(max_leaves))),
- }
- )
+ self.params["verbose"] = 0
self.estimator_class = RandomForestRegressor
- if task in ("binary", "multi"):
+ if task in CLASSIFICATION:
self.estimator_class = RandomForestClassifier
- self.params["criterion"] = criterion
-
- def get_params(self, deep=False):
- params = super().get_params()
- return params
class ExtraTreeEstimator(RandomForestEstimator):
@@ -648,21 +584,16 @@ class LRL1Classifier(SKLearnEstimator):
def cost_relative2lgbm(cls):
return 160
- def __init__(self, task="binary", n_jobs=1, tol=0.0001, C=1.0, **params):
- super().__init__(task, **params)
- self.params.update(
- {
- "penalty": params.get("penalty", "l1"),
- "tol": float(tol),
- "C": float(C),
- "solver": params.get("solver", "saga"),
- "n_jobs": n_jobs,
- }
- )
- assert task in (
- "binary",
- "multi",
- ), "LogisticRegression for classification task only"
+ def config2params(cls, config: dict) -> dict:
+ params = config.copy()
+ params["tol"] = params.get("tol", 0.0001)
+ params["solver"] = params.get("solver", "saga")
+ params["penalty"] = params.get("penalty", "l1")
+ return params
+
+ def __init__(self, task="binary", **config):
+ super().__init__(task, **config)
+ assert task in CLASSIFICATION, "LogisticRegression for classification task only"
self.estimator_class = LogisticRegression
@@ -675,21 +606,16 @@ class LRL2Classifier(SKLearnEstimator):
def cost_relative2lgbm(cls):
return 25
- def __init__(self, task="binary", n_jobs=1, tol=0.0001, C=1.0, **params):
- super().__init__(task, **params)
- self.params.update(
- {
- "penalty": params.get("penalty", "l2"),
- "tol": float(tol),
- "C": float(C),
- "solver": params.get("solver", "lbfgs"),
- "n_jobs": n_jobs,
- }
- )
- assert task in (
- "binary",
- "multi",
- ), "LogisticRegression for classification task only"
+ def config2params(cls, config: dict) -> dict:
+ params = config.copy()
+ params["tol"] = params.get("tol", 0.0001)
+ params["solver"] = params.get("solver", "lbfgs")
+ params["penalty"] = params.get("penalty", "l2")
+ return params
+
+ def __init__(self, task="binary", **config):
+ super().__init__(task, **config)
+ assert task in CLASSIFICATION, "LogisticRegression for classification task only"
self.estimator_class = LogisticRegression
@@ -749,39 +675,33 @@ class CatBoostEstimator(BaseEstimator):
X = X.to_numpy()
return X
+ def config2params(cls, config: dict) -> dict:
+ params = config.copy()
+ params["n_estimators"] = params.get("n_estimators", 8192)
+ if "n_jobs" in params:
+ params["thread_count"] = params.pop("n_jobs")
+ return params
+
def __init__(
self,
task="binary",
- n_jobs=1,
- n_estimators=8192,
- learning_rate=0.1,
- early_stopping_rounds=4,
- **params,
+ **config,
):
- super().__init__(task, **params)
+ super().__init__(task, **config)
self.params.update(
{
- "early_stopping_rounds": int(round(early_stopping_rounds)),
- "n_estimators": n_estimators,
- "learning_rate": learning_rate,
- "thread_count": n_jobs,
- "verbose": params.get("verbose", False),
- "random_seed": params.get("random_seed", 10242048),
+ "verbose": config.get("verbose", False),
+ "random_seed": config.get("random_seed", 10242048),
}
)
from catboost import CatBoostRegressor
self.estimator_class = CatBoostRegressor
- if task in ("binary", "multi"):
+ if task in CLASSIFICATION:
from catboost import CatBoostClassifier
self.estimator_class = CatBoostClassifier
- def get_params(self, deep=False):
- params = super().get_params()
- params["n_jobs"] = params["thread_count"]
- return params
-
def fit(self, X_train, y_train, budget=None, **kwargs):
import shutil
@@ -881,7 +801,7 @@ class CatBoostEstimator(BaseEstimator):
kwargs["sample_weight"] = weight
self._model = model
else:
- self.params["n_estimators"] = n_iter
+ self.params["n_estimators"] = self._model.tree_count_
# except CatBoostError:
# self._model = None
train_time = time.time() - start_time
@@ -904,22 +824,21 @@ class KNeighborsEstimator(BaseEstimator):
def cost_relative2lgbm(cls):
return 30
- def __init__(self, task="binary", n_jobs=1, n_neighbors=5, **params):
- super().__init__(task, **params)
- self.params.update(
- {
- "n_neighbors": int(round(n_neighbors)),
- "weights": params.get("weights", "distance"),
- "n_jobs": n_jobs,
- }
- )
- from sklearn.neighbors import KNeighborsRegressor
+ def config2params(cls, config: dict) -> dict:
+ params = config.copy()
+ params["weights"] = params.get("weights", "distance")
+ return params
- self.estimator_class = KNeighborsRegressor
- if task in ("binary", "multi"):
+ def __init__(self, task="binary", **config):
+ super().__init__(task, **config)
+ if task in CLASSIFICATION:
from sklearn.neighbors import KNeighborsClassifier
self.estimator_class = KNeighborsClassifier
+ else:
+ from sklearn.neighbors import KNeighborsRegressor
+
+ self.estimator_class = KNeighborsRegressor
def _preprocess(self, X):
if isinstance(X, pd.DataFrame):
@@ -963,9 +882,7 @@ class Prophet(BaseEstimator):
}
return space
- def __init__(self, task="forecast", **params):
- if "n_jobs" in params:
- params.pop("n_jobs")
+ def __init__(self, task="forecast", n_jobs=1, **params):
super().__init__(task, **params)
def _join(self, X_train, y_train):
diff --git a/flaml/nlp/hpo/searchalgo_auto.py b/flaml/nlp/hpo/searchalgo_auto.py
index 4a180cfac..441150026 100644
--- a/flaml/nlp/hpo/searchalgo_auto.py
+++ b/flaml/nlp/hpo/searchalgo_auto.py
@@ -13,7 +13,7 @@ SEARCH_ALGO_MAPPING = OrderedDict(
("bs", BlendSearch),
("grid", None),
("gridbert", None),
- ("rs", None)
+ ("rs", None),
]
)
@@ -35,14 +35,16 @@ class AutoSearchAlgorithm:
)
@classmethod
- def from_method_name(cls,
- search_algo_name,
- search_algo_args_mode,
- hpo_search_space,
- time_budget,
- metric_name,
- metric_mode_name,
- **custom_hpo_args):
+ def from_method_name(
+ cls,
+ search_algo_name,
+ search_algo_args_mode,
+ hpo_search_space,
+ time_budget,
+ metric_name,
+ metric_mode_name,
+ **custom_hpo_args
+ ):
"""
Instantiating one of the search algorithm classes based on the search algorithm name, search algorithm
argument mode, hpo search space and other keyword args
@@ -68,7 +70,9 @@ class AutoSearchAlgorithm:
{"points_to_evaluate": [{"learning_rate": 1e-5, "num_train_epochs": 10}])
"""
- assert hpo_search_space, "hpo_search_space needs to be specified for calling AutoSearchAlgorithm.from_method_name"
+ assert (
+ hpo_search_space
+ ), "hpo_search_space needs to be specified for calling AutoSearchAlgorithm.from_method_name"
if not search_algo_name:
# TODO coverage
search_algo_name = "grid"
@@ -83,9 +87,15 @@ class AutoSearchAlgorithm:
of the constructor function
"""
this_search_algo_kwargs = None
- allowed_arguments = SEARCH_ALGO_MAPPING[search_algo_name].__init__.__code__.co_varnames
- allowed_custom_args = {key: custom_hpo_args[key] for key in custom_hpo_args.keys() if
- key in allowed_arguments}
+ allowed_arguments = SEARCH_ALGO_MAPPING[
+ search_algo_name
+ ].__init__.__code__.co_varnames
+ custom_hpo_args["time_budget_s"] = time_budget
+ allowed_custom_args = {
+ key: custom_hpo_args[key]
+ for key in custom_hpo_args.keys()
+ if key in allowed_arguments
+ }
"""
If the search_algo_args_mode is "dft", set the args to the default args, e.g.,the default args for
@@ -94,26 +104,34 @@ class AutoSearchAlgorithm:
"""
if search_algo_args_mode == "dft":
# TODO coverage
- this_search_algo_kwargs = DEFAULT_SEARCH_ALGO_ARGS_MAPPING[search_algo_name](
+ this_search_algo_kwargs = DEFAULT_SEARCH_ALGO_ARGS_MAPPING[
+ search_algo_name
+ ](
"dft",
metric_name,
metric_mode_name,
hpo_search_space=hpo_search_space,
- **allowed_custom_args)
+ **allowed_custom_args
+ )
elif search_algo_args_mode == "cus":
- this_search_algo_kwargs = DEFAULT_SEARCH_ALGO_ARGS_MAPPING[search_algo_name](
+ this_search_algo_kwargs = DEFAULT_SEARCH_ALGO_ARGS_MAPPING[
+ search_algo_name
+ ](
"cus",
metric_name,
metric_mode_name,
hpo_search_space=hpo_search_space,
- **allowed_custom_args)
+ **allowed_custom_args
+ )
"""
returning the hpo algorithm with the arguments
"""
- search_algo = SEARCH_ALGO_MAPPING[search_algo_name](**this_search_algo_kwargs)
+ search_algo = SEARCH_ALGO_MAPPING[search_algo_name](
+ **this_search_algo_kwargs
+ )
if search_algo_name == "bs":
- search_algo.set_search_properties(config={"time_budget_s": time_budget})
+ search_algo.set_search_properties()
return search_algo
raise ValueError(
"Unrecognized method {} for this kind of AutoSearchAlgorithm: {}.\n"
@@ -125,29 +143,39 @@ class AutoSearchAlgorithm:
@staticmethod
def grid2list(grid_config):
# TODO coverage
- key_val_list = [[(key, each_val) for each_val in val_list['grid_search']]
- for (key, val_list) in grid_config.items()]
+ key_val_list = [
+ [(key, each_val) for each_val in val_list["grid_search"]]
+ for (key, val_list) in grid_config.items()
+ ]
config_list = [dict(x) for x in itertools.product(*key_val_list)]
return config_list
-def get_search_algo_args_optuna(search_args_mode,
- metric_name,
- metric_mode_name,
- hpo_search_space=None,
- **custom_hpo_args):
+def get_search_algo_args_optuna(
+ search_args_mode,
+ metric_name,
+ metric_mode_name,
+ hpo_search_space=None,
+ **custom_hpo_args
+):
# TODO coverage
return {}
-def default_search_algo_args_bs(search_args_mode,
- metric_name,
- metric_mode_name,
- hpo_search_space=None,
- **custom_hpo_args):
- assert hpo_search_space, "hpo_search_space needs to be specified for calling AutoSearchAlgorithm.from_method_name"
- if "num_train_epochs" in hpo_search_space and \
- isinstance(hpo_search_space["num_train_epochs"], ray.tune.sample.Categorical):
+def default_search_algo_args_bs(
+ search_args_mode,
+ metric_name,
+ metric_mode_name,
+ hpo_search_space=None,
+ time_budget_s=None,
+ **custom_hpo_args
+):
+ assert (
+ hpo_search_space
+ ), "hpo_search_space needs to be specified for calling AutoSearchAlgorithm.from_method_name"
+ if "num_train_epochs" in hpo_search_space and isinstance(
+ hpo_search_space["num_train_epochs"], ray.tune.sample.Categorical
+ ):
min_epoch = min(hpo_search_space["num_train_epochs"].categories)
else:
# TODO coverage
@@ -156,31 +184,38 @@ def default_search_algo_args_bs(search_args_mode,
default_search_algo_args = {
"low_cost_partial_config": {
"num_train_epochs": min_epoch,
- "per_device_train_batch_size": max(hpo_search_space["per_device_train_batch_size"].categories),
+ "per_device_train_batch_size": max(
+ hpo_search_space["per_device_train_batch_size"].categories
+ ),
},
"space": hpo_search_space,
"metric": metric_name,
- "mode": metric_mode_name
+ "mode": metric_mode_name,
+ "time_budget_s": time_budget_s,
}
if search_args_mode == "cus":
default_search_algo_args.update(custom_hpo_args)
return default_search_algo_args
-def default_search_algo_args_grid_search(search_args_mode,
- metric_name,
- metric_mode_name,
- hpo_search_space=None,
- **custom_hpo_args):
+def default_search_algo_args_grid_search(
+ search_args_mode,
+ metric_name,
+ metric_mode_name,
+ hpo_search_space=None,
+ **custom_hpo_args
+):
# TODO coverage
return {}
-def default_search_algo_args_random_search(search_args_mode,
- metric_name,
- metric_mode_name,
- hpo_search_space=None,
- **custom_hpo_args):
+def default_search_algo_args_random_search(
+ search_args_mode,
+ metric_name,
+ metric_mode_name,
+ hpo_search_space=None,
+ **custom_hpo_args
+):
# TODO coverage
return {}
@@ -191,6 +226,5 @@ DEFAULT_SEARCH_ALGO_ARGS_MAPPING = OrderedDict(
("cfo", default_search_algo_args_bs),
("bs", default_search_algo_args_bs),
("grid", default_search_algo_args_grid_search),
- ("gridbert", default_search_algo_args_random_search)
]
)
diff --git a/flaml/onlineml/trial_runner.py b/flaml/onlineml/trial_runner.py
index a5e584e81..d1cc6e542 100644
--- a/flaml/onlineml/trial_runner.py
+++ b/flaml/onlineml/trial_runner.py
@@ -1,10 +1,10 @@
-import time
import numpy as np
import math
from flaml.tune import Trial
from flaml.scheduler import TrialScheduler
import logging
+
logger = logging.getLogger(__name__)
@@ -45,16 +45,18 @@ class OnlineTrialRunner:
Status change routine of a trial
Trial.PENDING -> (Trial.RUNNING -> Trial.PAUSED -> Trial.RUNNING -> ...) -> Trial.TERMINATED(optional)
"""
+
RANDOM_SEED = 123456
WARMSTART_NUM = 100
- def __init__(self,
- max_live_model_num: int,
- searcher=None,
- scheduler=None,
- champion_test_policy='loss_ucb',
- **kwargs
- ):
+ def __init__(
+ self,
+ max_live_model_num: int,
+ searcher=None,
+ scheduler=None,
+ champion_test_policy="loss_ucb",
+ **kwargs
+ ):
"""Constructor
Args:
@@ -64,7 +66,7 @@ class OnlineTrialRunner:
Required methods of the searcher:
- next_trial()
Generate the next trial to add.
- - set_search_properties(metric: Optional[str], mode: Optional[str], config: dict)
+ - set_search_properties(metric: Optional[str], mode: Optional[str], config: Optional[dict], setting: Optional[dict])
Generate new challengers based on the current champion and update the challenger list
- on_trial_result(trial_id: str, result: Dict)
Reprot results to the scheduler.
@@ -87,8 +89,8 @@ class OnlineTrialRunner:
self._scheduler = scheduler
self._champion_test_policy = champion_test_policy
self._max_live_model_num = max_live_model_num
- self._remove_worse = kwargs.get('remove_worse', True)
- self._bound_trial_num = kwargs.get('bound_trial_num', False)
+ self._remove_worse = kwargs.get("remove_worse", True)
+ self._bound_trial_num = kwargs.get("bound_trial_num", False)
self._no_model_persistence = True
# stores all the trials added to the OnlineTrialRunner
@@ -103,21 +105,19 @@ class OnlineTrialRunner:
# initially schedule up to max_live_model_num of live models and
# set the first trial as the champion (which is done inside self.step())
self._total_steps = 0
- logger.info('init step %s', self._max_live_model_num)
+ logger.info("init step %s", self._max_live_model_num)
# TODO: add more comments
self.step()
assert self._champion_trial is not None
@property
def champion_trial(self) -> Trial:
- """The champion trial
- """
+ """The champion trial"""
return self._champion_trial
@property
def running_trials(self):
- """The running/'live' trials
- """
+ """The running/'live' trials"""
return self._running_trials
def step(self, data_sample=None, prediction_trial_tuple=None):
@@ -147,7 +147,10 @@ class OnlineTrialRunner:
# ***********Update running trials with observation***************************
if data_sample is not None:
self._total_steps += 1
- prediction_made, prediction_trial = prediction_trial_tuple[0], prediction_trial_tuple[1]
+ prediction_made, prediction_trial = (
+ prediction_trial_tuple[0],
+ prediction_trial_tuple[1],
+ )
# assert prediction_trial.status == Trial.RUNNING
trials_to_pause = []
for trial in list(self._running_trials):
@@ -156,16 +159,27 @@ class OnlineTrialRunner:
else:
y_predicted = prediction_made
trial.train_eval_model_online(data_sample, y_predicted)
- logger.debug('running trial at iter %s %s %s %s %s %s', self._total_steps,
- trial.trial_id, trial.result.loss_avg, trial.result.loss_cb,
- trial.result.resource_used, trial.resource_lease)
+ logger.debug(
+ "running trial at iter %s %s %s %s %s %s",
+ self._total_steps,
+ trial.trial_id,
+ trial.result.loss_avg,
+ trial.result.loss_cb,
+ trial.result.resource_used,
+ trial.resource_lease,
+ )
# report result to the searcher
self._searcher.on_trial_result(trial.trial_id, trial.result)
# report result to the scheduler and the scheduler makes a decision about
# the running status of the trial
decision = self._scheduler.on_trial_result(self, trial, trial.result)
# set the status of the trial according to the decision made by the scheduler
- logger.debug('trial decision %s %s at step %s', decision, trial.trial_id, self._total_steps)
+ logger.debug(
+ "trial decision %s %s at step %s",
+ decision,
+ trial.trial_id,
+ self._total_steps,
+ )
if decision == TrialScheduler.STOP:
self.stop_trial(trial)
elif decision == TrialScheduler.PAUSE:
@@ -191,38 +205,45 @@ class OnlineTrialRunner:
else:
break
- def get_top_running_trials(self, top_ratio=None, top_metric='ucb') -> list:
- """Get a list of trial ids, whose performance is among the top running trials
- """
- running_valid_trials = [trial for trial in self._running_trials if
- trial.result is not None]
+ def get_top_running_trials(self, top_ratio=None, top_metric="ucb") -> list:
+ """Get a list of trial ids, whose performance is among the top running trials"""
+ running_valid_trials = [
+ trial for trial in self._running_trials if trial.result is not None
+ ]
if not running_valid_trials:
return
if top_ratio is None:
top_number = 0
elif isinstance(top_ratio, float):
top_number = math.ceil(len(running_valid_trials) * top_ratio)
- elif isinstance(top_ratio, str) and 'best' in top_ratio:
+ elif isinstance(top_ratio, str) and "best" in top_ratio:
top_number = 1
else:
raise NotImplementedError
- if 'ucb' in top_metric:
- test_attribute = 'loss_ucb'
- elif 'avg' in top_metric:
- test_attribute = 'loss_avg'
- elif 'lcb' in top_metric:
- test_attribute = 'loss_lcb'
+ if "ucb" in top_metric:
+ test_attribute = "loss_ucb"
+ elif "avg" in top_metric:
+ test_attribute = "loss_avg"
+ elif "lcb" in top_metric:
+ test_attribute = "loss_lcb"
else:
raise NotImplementedError
top_running_valid_trials = []
- logger.info('Running trial ids %s', [trial.trial_id for trial in running_valid_trials])
+ logger.info(
+ "Running trial ids %s", [trial.trial_id for trial in running_valid_trials]
+ )
self._random_state.shuffle(running_valid_trials)
- results = [trial.result.get_score(test_attribute) for trial in running_valid_trials]
- sorted_index = np.argsort(np.array(results)) # sorted result (small to large) index
+ results = [
+ trial.result.get_score(test_attribute) for trial in running_valid_trials
+ ]
+ # sorted result (small to large) index
+ sorted_index = np.argsort(np.array(results))
for i in range(min(top_number, len(running_valid_trials))):
top_running_valid_trials.append(running_valid_trials[sorted_index[i]])
- logger.info('Top running ids %s', [trial.trial_id for trial in top_running_valid_trials])
+ logger.info(
+ "Top running ids %s", [trial.trial_id for trial in top_running_valid_trials]
+ )
return top_running_valid_trials
def _add_trial_from_searcher(self):
@@ -234,12 +255,25 @@ class OnlineTrialRunner:
"""
# (optionally) upper bound the number of trials in the OnlineTrialRunner
if self._bound_trial_num and self._first_challenger_pool_size is not None:
- active_trial_size = len([t for t in self._trials if t.status != Trial.TERMINATED])
- trial_num_upper_bound = int(round((np.log10(self._total_steps) + 1) * self._first_challenger_pool_size)
- ) if self._first_challenger_pool_size else np.inf
+ active_trial_size = len(
+ [t for t in self._trials if t.status != Trial.TERMINATED]
+ )
+ trial_num_upper_bound = (
+ int(
+ round(
+ (np.log10(self._total_steps) + 1)
+ * self._first_challenger_pool_size
+ )
+ )
+ if self._first_challenger_pool_size
+ else np.inf
+ )
if active_trial_size > trial_num_upper_bound:
- logger.info('Not adding new trials: %s exceeds trial limit %s.',
- active_trial_size, trial_num_upper_bound)
+ logger.info(
+ "Not adding new trials: %s exceeds trial limit %s.",
+ active_trial_size,
+ trial_num_upper_bound,
+ )
return None
# output one trial from the trial pool (new challenger pool) maintained in the searcher
@@ -253,7 +287,7 @@ class OnlineTrialRunner:
# a valid trial is added.
# Assumption on self._searcher: the first trial generated is the champion trial
if self._champion_trial is None:
- logger.info('Initial set up of the champion trial %s', trial.config)
+ logger.info("Initial set up of the champion trial %s", trial.config)
self._set_champion(trial)
else:
self._all_new_challengers_added = True
@@ -261,14 +295,15 @@ class OnlineTrialRunner:
self._first_challenger_pool_size = len(self._trials)
def _champion_test(self):
- """Perform tests again the latest champion, including bette_than tests and worse_than tests
- """
+ """Perform tests again the latest champion, including bette_than tests and worse_than tests"""
# for BetterThan test, we only need to compare the best challenger with the champion
self._get_best_challenger()
if self._best_challenger_trial is not None:
assert self._best_challenger_trial.trial_id != self._champion_trial.trial_id
# test whether a new champion is found and set the trial properties accordingly
- is_new_champion_found = self._better_than_champion_test(self._best_challenger_trial)
+ is_new_champion_found = self._better_than_champion_test(
+ self._best_challenger_trial
+ )
if is_new_champion_found:
self._set_champion(new_champion_trial=self._best_challenger_trial)
@@ -278,39 +313,47 @@ class OnlineTrialRunner:
for trial_to_test in self._trials:
if trial_to_test.status != Trial.TERMINATED:
worse_than_champion = self._worse_than_champion_test(
- self._champion_trial, trial_to_test, self.WARMSTART_NUM)
+ self._champion_trial, trial_to_test, self.WARMSTART_NUM
+ )
if worse_than_champion:
to_stop.append(trial_to_test)
# we want to ensure there are at least #max_live_model_num of challengers remaining
- max_to_stop_num = len([t for t in self._trials if t.status != Trial.TERMINATED]
- ) - self._max_live_model_num
+ max_to_stop_num = (
+ len([t for t in self._trials if t.status != Trial.TERMINATED])
+ - self._max_live_model_num
+ )
for i in range(min(max_to_stop_num, len(to_stop))):
self.stop_trial(to_stop[i])
def _get_best_challenger(self):
- """Get the 'best' (in terms of the champion_test_policy) challenger under consideration.
- """
+ """Get the 'best' (in terms of the champion_test_policy) challenger under consideration."""
if self._champion_test_policy is None:
return
- if 'ucb' in self._champion_test_policy:
- test_attribute = 'loss_ucb'
- elif 'avg' in self._champion_test_policy:
- test_attribute = 'loss_avg'
+ if "ucb" in self._champion_test_policy:
+ test_attribute = "loss_ucb"
+ elif "avg" in self._champion_test_policy:
+ test_attribute = "loss_avg"
else:
raise NotImplementedError
- active_trials = [trial for trial in self._trials if
- (trial.status != Trial.TERMINATED
- and trial.trial_id != self._champion_trial.trial_id
- and trial.result is not None)]
+ active_trials = [
+ trial
+ for trial in self._trials
+ if (
+ trial.status != Trial.TERMINATED
+ and trial.trial_id != self._champion_trial.trial_id
+ and trial.result is not None
+ )
+ ]
if active_trials:
self._random_state.shuffle(active_trials)
- results = [trial.result.get_score(test_attribute) for trial in active_trials]
+ results = [
+ trial.result.get_score(test_attribute) for trial in active_trials
+ ]
best_index = np.argmin(results)
self._best_challenger_trial = active_trials[best_index]
def _set_champion(self, new_champion_trial):
- """Set the status of the existing trials once a new champion is found.
- """
+ """Set the status of the existing trials once a new champion is found."""
assert new_champion_trial is not None
is_init_update = False
if self._champion_trial is None:
@@ -324,21 +367,20 @@ class OnlineTrialRunner:
trial.set_checked_under_current_champion(False)
self._champion_trial = new_champion_trial
self._all_new_challengers_added = False
- logger.info('Set the champion as %s', self._champion_trial.trial_id)
+ logger.info("Set the champion as %s", self._champion_trial.trial_id)
if not is_init_update:
self._champion_update_times += 1
# calling set_search_properties of searcher will trigger
# new challenger generation. we do not do this for init champion
# as this step is already done when first constructing the searcher
- self._searcher.set_search_properties(None, None,
- {self._searcher.CHAMPION_TRIAL_NAME: self._champion_trial}
- )
+ self._searcher.set_search_properties(
+ setting={self._searcher.CHAMPION_TRIAL_NAME: self._champion_trial}
+ )
else:
self._champion_update_times = 0
def get_trials(self) -> list:
- """Return the list of trials managed by this TrialRunner.
- """
+ """Return the list of trials managed by this TrialRunner."""
return self._trials
def add_trial(self, new_trial):
@@ -357,8 +399,12 @@ class OnlineTrialRunner:
if trial.trial_id == new_trial.trial_id:
trial.set_checked_under_current_champion(True)
return
- logger.info('adding trial at iter %s, %s %s', self._total_steps, new_trial.trial_id,
- len(self._trials))
+ logger.info(
+ "adding trial at iter %s, %s %s",
+ self._total_steps,
+ new_trial.trial_id,
+ len(self._trials),
+ )
self._trials.append(new_trial)
self._scheduler.on_trial_add(self, new_trial)
@@ -369,8 +415,11 @@ class OnlineTrialRunner:
if trial.status in [Trial.ERROR, Trial.TERMINATED]:
return
else:
- logger.info('Terminating trial %s, with trial result %s',
- trial.trial_id, trial.result)
+ logger.info(
+ "Terminating trial %s, with trial result %s",
+ trial.trial_id,
+ trial.result,
+ )
trial.set_status(Trial.TERMINATED)
# clean up model and result
trial.clean_up_model()
@@ -385,10 +434,15 @@ class OnlineTrialRunner:
if trial.status in [Trial.ERROR, Trial.TERMINATED]:
return
else:
- logger.info('Pausing trial %s, with trial loss_avg: %s, loss_cb: %s, loss_ucb: %s,\
- resource_lease: %s', trial.trial_id, trial.result.loss_avg,
- trial.result.loss_cb, trial.result.loss_avg + trial.result.loss_cb,
- trial.resource_lease)
+ logger.info(
+ "Pausing trial %s, with trial loss_avg: %s, loss_cb: %s, loss_ucb: %s,\
+ resource_lease: %s",
+ trial.trial_id,
+ trial.result.loss_avg,
+ trial.result.loss_cb,
+ trial.result.loss_avg + trial.result.loss_cb,
+ trial.resource_lease,
+ )
trial.set_status(Trial.PAUSED)
# clean up model and result if no model persistence
if self._no_model_persistence:
@@ -413,11 +467,15 @@ class OnlineTrialRunner:
A bool indicating whether a new champion is found
"""
if trial_to_test.result is not None and self._champion_trial.result is not None:
- if 'ucb' in self._champion_test_policy:
- return self._test_lcb_ucb(self._champion_trial, trial_to_test, self.WARMSTART_NUM)
- elif 'avg' in self._champion_test_policy:
- return self._test_avg_loss(self._champion_trial, trial_to_test, self.WARMSTART_NUM)
- elif 'martingale' in self._champion_test_policy:
+ if "ucb" in self._champion_test_policy:
+ return self._test_lcb_ucb(
+ self._champion_trial, trial_to_test, self.WARMSTART_NUM
+ )
+ elif "avg" in self._champion_test_policy:
+ return self._test_avg_loss(
+ self._champion_trial, trial_to_test, self.WARMSTART_NUM
+ )
+ elif "martingale" in self._champion_test_policy:
return self._test_martingale(self._champion_trial, trial_to_test)
else:
raise NotImplementedError
@@ -426,22 +484,38 @@ class OnlineTrialRunner:
@staticmethod
def _worse_than_champion_test(champion_trial, trial, warmstart_num=1) -> bool:
- """Test whether the input trial is worse than the champion_trial
- """
+ """Test whether the input trial is worse than the champion_trial"""
if trial.result is not None and trial.result.resource_used >= warmstart_num:
if trial.result.loss_lcb > champion_trial.result.loss_ucb:
- logger.info('=========trial %s is worse than champion %s=====',
- trial.trial_id, champion_trial.trial_id)
- logger.info('trial %s %s %s', trial.config, trial.result, trial.resource_lease)
- logger.info('trial loss_avg:%s, trial loss_cb %s', trial.result.loss_avg,
- trial.result.loss_cb)
- logger.info('champion loss_avg:%s, champion loss_cb %s', champion_trial.result.loss_avg,
- champion_trial.result.loss_cb)
- logger.info('champion %s', champion_trial.config)
- logger.info('trial loss_avg_recent:%s, trial loss_cb %s', trial.result.loss_avg_recent,
- trial.result.loss_cb)
- logger.info('champion loss_avg_recent:%s, champion loss_cb %s',
- champion_trial.result.loss_avg_recent, champion_trial.result.loss_cb)
+ logger.info(
+ "=========trial %s is worse than champion %s=====",
+ trial.trial_id,
+ champion_trial.trial_id,
+ )
+ logger.info(
+ "trial %s %s %s", trial.config, trial.result, trial.resource_lease
+ )
+ logger.info(
+ "trial loss_avg:%s, trial loss_cb %s",
+ trial.result.loss_avg,
+ trial.result.loss_cb,
+ )
+ logger.info(
+ "champion loss_avg:%s, champion loss_cb %s",
+ champion_trial.result.loss_avg,
+ champion_trial.result.loss_cb,
+ )
+ logger.info("champion %s", champion_trial.config)
+ logger.info(
+ "trial loss_avg_recent:%s, trial loss_cb %s",
+ trial.result.loss_avg_recent,
+ trial.result.loss_cb,
+ )
+ logger.info(
+ "champion loss_avg_recent:%s, champion loss_cb %s",
+ champion_trial.result.loss_avg_recent,
+ champion_trial.result.loss_cb,
+ )
return True
return False
@@ -452,18 +526,35 @@ class OnlineTrialRunner:
"""
assert trial.trial_id != champion_trial.trial_id
if trial.result.resource_used >= warmstart_num:
- if trial.result.loss_ucb < champion_trial.result.loss_lcb - champion_trial.result.loss_cb:
- logger.info('======new champion condition satisfied: using lcb vs ucb=====')
- logger.info('new champion trial %s %s %s',
- trial.trial_id, trial.result.resource_used, trial.resource_lease)
- logger.info('new champion trial loss_avg:%s, trial loss_cb %s',
- trial.result.loss_avg, trial.result.loss_cb)
- logger.info('old champion trial %s %s %s',
- champion_trial.trial_id, champion_trial.result.resource_used,
- champion_trial.resource_lease,)
- logger.info('old champion loss avg %s, loss cb %s',
- champion_trial.result.loss_avg,
- champion_trial.result.loss_cb)
+ if (
+ trial.result.loss_ucb
+ < champion_trial.result.loss_lcb - champion_trial.result.loss_cb
+ ):
+ logger.info(
+ "======new champion condition satisfied: using lcb vs ucb====="
+ )
+ logger.info(
+ "new champion trial %s %s %s",
+ trial.trial_id,
+ trial.result.resource_used,
+ trial.resource_lease,
+ )
+ logger.info(
+ "new champion trial loss_avg:%s, trial loss_cb %s",
+ trial.result.loss_avg,
+ trial.result.loss_cb,
+ )
+ logger.info(
+ "old champion trial %s %s %s",
+ champion_trial.trial_id,
+ champion_trial.result.resource_used,
+ champion_trial.resource_lease,
+ )
+ logger.info(
+ "old champion loss avg %s, loss cb %s",
+ champion_trial.result.loss_avg,
+ champion_trial.result.loss_cb,
+ )
return True
return False
@@ -475,13 +566,19 @@ class OnlineTrialRunner:
assert trial.trial_id != champion_trial.trial_id
if trial.result.resource_used >= warmstart_num:
if trial.result.loss_avg < champion_trial.result.loss_avg:
- logger.info('=====new champion condition satisfied using avg loss=====')
- logger.info('trial %s', trial.config)
- logger.info('trial loss_avg:%s, trial loss_cb %s',
- trial.result.loss_avg, trial.result.loss_cb)
- logger.info('champion loss_avg:%s, champion loss_cb %s',
- champion_trial.result.loss_avg, champion_trial.result.loss_cb)
- logger.info('champion %s', champion_trial.config)
+ logger.info("=====new champion condition satisfied using avg loss=====")
+ logger.info("trial %s", trial.config)
+ logger.info(
+ "trial loss_avg:%s, trial loss_cb %s",
+ trial.result.loss_avg,
+ trial.result.loss_cb,
+ )
+ logger.info(
+ "champion loss_avg:%s, champion loss_cb %s",
+ champion_trial.result.loss_avg,
+ champion_trial.result.loss_cb,
+ )
+ logger.info("champion %s", champion_trial.config)
return True
return False
diff --git a/flaml/searcher/blendsearch.py b/flaml/searcher/blendsearch.py
index ee6dc5340..e4e20130c 100644
--- a/flaml/searcher/blendsearch.py
+++ b/flaml/searcher/blendsearch.py
@@ -129,11 +129,12 @@ class BlendSearch(Searcher):
self._metric, self._mode = metric, mode
init_config = low_cost_partial_config or {}
if not init_config:
- logger.warning(
+ logger.info(
"No low-cost partial config given to the search algorithm. "
"For cost-frugal search, "
"consider providing low-cost values for cost-related hps via "
- "'low_cost_partial_config'."
+ "'low_cost_partial_config'. More info can be found at "
+ "https://github.com/microsoft/FLAML/wiki/About-%60low_cost_partial_config%60"
)
if evaluated_rewards and mode:
self._points_to_evaluate = []
@@ -228,6 +229,7 @@ class BlendSearch(Searcher):
metric: Optional[str] = None,
mode: Optional[str] = None,
config: Optional[Dict] = None,
+ setting: Optional[Dict] = None,
) -> bool:
metric_changed = mode_changed = False
if metric and self._metric != metric:
@@ -264,22 +266,22 @@ class BlendSearch(Searcher):
)
self._gs.space = self._ls.space
self._init_search()
- if config:
- # CFO doesn't need these settings
- if "time_budget_s" in config:
- self._time_budget_s = config["time_budget_s"] # budget from now
- now = time.time()
- self._time_used += now - self._start_time
- self._start_time = now
- self._set_deadline()
- if "metric_target" in config:
- self._metric_target = config.get("metric_target")
- if "num_samples" in config:
- self._num_samples = (
- config["num_samples"]
- + len(self._result)
- + len(self._trial_proposed_by)
- )
+ if setting:
+ # CFO doesn't need these settings
+ if "time_budget_s" in setting:
+ self._time_budget_s = setting["time_budget_s"] # budget from now
+ now = time.time()
+ self._time_used += now - self._start_time
+ self._start_time = now
+ self._set_deadline()
+ if "metric_target" in setting:
+ self._metric_target = setting.get("metric_target")
+ if "num_samples" in setting:
+ self._num_samples = (
+ setting["num_samples"]
+ + len(self._result)
+ + len(self._trial_proposed_by)
+ )
return True
def _set_deadline(self):
diff --git a/flaml/searcher/flow2.py b/flaml/searcher/flow2.py
index 7e3b49cd9..9057c6584 100644
--- a/flaml/searcher/flow2.py
+++ b/flaml/searcher/flow2.py
@@ -1,14 +1,16 @@
-'''!
- * Copyright (c) 2020-2021 Microsoft Corporation. All rights reserved.
+"""!
+ * Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the
* project root for license information.
-'''
+"""
from flaml.tune.sample import Domain
from typing import Dict, Optional, Tuple
import numpy as np
+
try:
from ray import __version__ as ray_version
- assert ray_version >= '1.0.0'
+
+ assert ray_version >= "1.0.0"
from ray.tune.suggest import Searcher
from ray.tune.suggest.variant_generator import generate_variants
from ray.tune import sample
@@ -22,28 +24,30 @@ from ..tune.space import complete_config, denormalize, normalize
import logging
+
logger = logging.getLogger(__name__)
class FLOW2(Searcher):
- '''Local search algorithm FLOW2, with adaptive step size
- '''
+ """Local search algorithm FLOW2, with adaptive step size"""
STEPSIZE = 0.1
STEP_LOWER_BOUND = 0.0001
- def __init__(self,
- init_config: dict,
- metric: Optional[str] = None,
- mode: Optional[str] = None,
- space: Optional[dict] = None,
- prune_attr: Optional[str] = None,
- min_resource: Optional[float] = None,
- max_resource: Optional[float] = None,
- resource_multiple_factor: Optional[float] = 4,
- cost_attr: Optional[str] = 'time_total_s',
- seed: Optional[int] = 20):
- '''Constructor
+ def __init__(
+ self,
+ init_config: dict,
+ metric: Optional[str] = None,
+ mode: Optional[str] = None,
+ space: Optional[dict] = None,
+ prune_attr: Optional[str] = None,
+ min_resource: Optional[float] = None,
+ max_resource: Optional[float] = None,
+ resource_multiple_factor: Optional[float] = 4,
+ cost_attr: Optional[str] = "time_total_s",
+ seed: Optional[int] = 20,
+ ):
+ """Constructor
Args:
init_config: a dictionary of a partial or full initial config,
@@ -79,20 +83,18 @@ class FLOW2(Searcher):
used for increasing resource.
cost_attr: A string of the attribute used for cost.
seed: An integer of the random seed.
- '''
+ """
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
else:
mode = "min"
- super(FLOW2, self).__init__(
- metric=metric,
- mode=mode)
+ super(FLOW2, self).__init__(metric=metric, mode=mode)
# internally minimizes, so "max" => -1
if mode == "max":
- self.metric_op = -1.
+ self.metric_op = -1.0
elif mode == "min":
- self.metric_op = 1.
+ self.metric_op = 1.0
self.space = space or {}
self._space = flatten_dict(self.space, prevent_delimiter=True)
self._random = np.random.RandomState(seed)
@@ -106,7 +108,7 @@ class FLOW2(Searcher):
self.max_resource = max_resource
self._resource = None
self._step_lb = np.Inf
- if space:
+ if space is not None:
self._init_search()
def _init_search(self):
@@ -115,9 +117,10 @@ class FLOW2(Searcher):
self._unordered_cat_hp = {}
hier = False
for key, domain in self._space.items():
- assert not (isinstance(domain, dict) and 'grid_search' in domain), \
- f"{key}'s domain is grid search, not supported in FLOW^2."
- if callable(getattr(domain, 'get_sampler', None)):
+ assert not (
+ isinstance(domain, dict) and "grid_search" in domain
+ ), f"{key}'s domain is grid search, not supported in FLOW^2."
+ if callable(getattr(domain, "get_sampler", None)):
self._tunable_keys.append(key)
sampler = domain.get_sampler()
# the step size lower bound for uniform variables doesn't depend
@@ -125,12 +128,14 @@ class FLOW2(Searcher):
if isinstance(sampler, sample.Quantized):
q = sampler.q
sampler = sampler.get_sampler()
- if str(sampler) == 'Uniform':
+ if str(sampler) == "Uniform":
self._step_lb = min(
- self._step_lb, q / (domain.upper - domain.lower))
- elif isinstance(domain, sample.Integer) and str(sampler) == 'Uniform':
+ self._step_lb, q / (domain.upper - domain.lower)
+ )
+ elif isinstance(domain, sample.Integer) and str(sampler) == "Uniform":
self._step_lb = min(
- self._step_lb, 1.0 / (domain.upper - 1 - domain.lower))
+ self._step_lb, 1.0 / (domain.upper - 1 - domain.lower)
+ )
if isinstance(domain, sample.Categorical):
if not domain.ordered:
self._unordered_cat_hp[key] = len(domain.categories)
@@ -139,13 +144,12 @@ class FLOW2(Searcher):
if isinstance(cat, dict):
hier = True
break
- if str(sampler) != 'Normal':
+ if str(sampler) != "Normal":
self._bounded_keys.append(key)
if not hier:
self._space_keys = sorted(self._tunable_keys)
self.hierarchical = hier
- if (self.prune_attr and self.prune_attr not in self._space
- and self.max_resource):
+ if self.prune_attr and self.prune_attr not in self._space and self.max_resource:
self.min_resource = self.min_resource or self._min_resource()
self._resource = self._round(self.min_resource)
if not hier:
@@ -169,10 +173,11 @@ class FLOW2(Searcher):
if self.step > self.step_ub:
self.step = self.step_ub
# maximal # consecutive no improvements
- self.dir = 2**(min(9, self.dim))
+ self.dir = 2 ** (min(9, self.dim))
self._configs = {} # dict from trial_id to (config, stepsize)
self._K = 0
- self._iter_best_config = self.trial_count_proposed = self.trial_count_complete = 1
+ self._iter_best_config = 1
+ self.trial_count_proposed = self.trial_count_complete = 1
self._num_proposedby_incumbent = 0
self._reset_times = 0
# record intermediate trial cost
@@ -196,14 +201,18 @@ class FLOW2(Searcher):
if isinstance(sampler, sample.Quantized):
q = sampler.q
sampler_inner = sampler.get_sampler()
- if str(sampler_inner) == 'LogUniform':
+ if str(sampler_inner) == "LogUniform":
step_lb = min(
- step_lb, np.log(1.0 + q / self.best_config[key])
- / np.log(domain.upper / domain.lower))
- elif isinstance(domain, sample.Integer) and str(sampler) == 'LogUniform':
+ step_lb,
+ np.log(1.0 + q / self.best_config[key])
+ / np.log(domain.upper / domain.lower),
+ )
+ elif isinstance(domain, sample.Integer) and str(sampler) == "LogUniform":
step_lb = min(
- step_lb, np.log(1.0 + 1.0 / self.best_config[key])
- / np.log((domain.upper - 1) / domain.lower))
+ step_lb,
+ np.log(1.0 + 1.0 / self.best_config[key])
+ / np.log((domain.upper - 1) / domain.lower),
+ )
if np.isinf(step_lb):
step_lb = self.STEP_LOWER_BOUND
else:
@@ -215,13 +224,11 @@ class FLOW2(Searcher):
return self._resource
def _min_resource(self) -> float:
- ''' automatically decide minimal resource
- '''
+ """automatically decide minimal resource"""
return self.max_resource / np.pow(self.resource_multiple_factor, 5)
def _round(self, resource) -> float:
- ''' round the resource to self.max_resource if close to it
- '''
+ """round the resource to self.max_resource if close to it"""
if resource * self.resource_multiple_factor > self.max_resource:
return self.max_resource
return resource
@@ -231,70 +238,83 @@ class FLOW2(Searcher):
return vec
def complete_config(
- self, partial_config: Dict,
- lower: Optional[Dict] = None, upper: Optional[Dict] = None
+ self,
+ partial_config: Dict,
+ lower: Optional[Dict] = None,
+ upper: Optional[Dict] = None,
) -> Tuple[Dict, Dict]:
- ''' generate a complete config from the partial config input
+ """generate a complete config from the partial config input
add minimal resource to config if available
- '''
+ """
disturb = self._reset_times and partial_config == self.init_config
# if not the first time to complete init_config, use random gaussian
config, space = complete_config(
- partial_config, self.space, self, disturb, lower, upper)
+ partial_config, self.space, self, disturb, lower, upper
+ )
if partial_config == self.init_config:
self._reset_times += 1
if self._resource:
config[self.prune_attr] = self.min_resource
return config, space
- def create(self, init_config: Dict, obj: float, cost: float, space: Dict
- ) -> Searcher:
+ def create(
+ self, init_config: Dict, obj: float, cost: float, space: Dict
+ ) -> Searcher:
# space is the subspace where the init_config is located
flow2 = self.__class__(
- init_config, self.metric, self.mode,
- space, self.prune_attr,
- self.min_resource, self.max_resource,
- self.resource_multiple_factor, self.cost_attr, self.seed + 1)
+ init_config,
+ self.metric,
+ self.mode,
+ space,
+ self.prune_attr,
+ self.min_resource,
+ self.max_resource,
+ self.resource_multiple_factor,
+ self.cost_attr,
+ self.seed + 1,
+ )
flow2.best_obj = obj * self.metric_op # minimize internally
flow2.cost_incumbent = cost
self.seed += 1
return flow2
def normalize(self, config, recursive=False) -> Dict:
- ''' normalize each dimension in config to [0,1]
- '''
+ """normalize each dimension in config to [0,1]"""
return normalize(
- config, self._space, self.best_config, self.incumbent, recursive)
+ config, self._space, self.best_config, self.incumbent, recursive
+ )
def denormalize(self, config):
- ''' denormalize each dimension in config from [0,1]
- '''
+ """denormalize each dimension in config from [0,1]"""
return denormalize(
- config, self._space, self.best_config, self.incumbent, self._random)
+ config, self._space, self.best_config, self.incumbent, self._random
+ )
- def set_search_properties(self,
- metric: Optional[str] = None,
- mode: Optional[str] = None,
- config: Optional[Dict] = None) -> bool:
+ def set_search_properties(
+ self,
+ metric: Optional[str] = None,
+ mode: Optional[str] = None,
+ config: Optional[Dict] = None,
+ ) -> bool:
if metric:
self._metric = metric
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self._mode = mode
if mode == "max":
- self.metric_op = -1.
+ self.metric_op = -1.0
elif mode == "min":
- self.metric_op = 1.
+ self.metric_op = 1.0
if config:
self.space = config
self._space = flatten_dict(self.space)
self._init_search()
return True
- def on_trial_complete(self, trial_id: str, result: Optional[Dict] = None,
- error: bool = False):
- ''' compare with incumbent
- '''
+ def on_trial_complete(
+ self, trial_id: str, result: Optional[Dict] = None, error: bool = False
+ ):
+ # compare with incumbent
# if better, move, reset num_complete and num_proposed
# if not better and num_complete >= 2*dim, num_allowed += 2
self.trial_count_complete += 1
@@ -329,15 +349,19 @@ class FLOW2(Searcher):
if proposed_by == self.incumbent:
# proposed by current incumbent and no better
self._num_complete4incumbent += 1
- cost = result.get(
- self.cost_attr) if result else self._trial_cost.get(trial_id)
+ cost = (
+ result.get(self.cost_attr) if result else self._trial_cost.get(trial_id)
+ )
if cost:
self._cost_complete4incumbent += cost
- if self._num_complete4incumbent >= 2 * self.dim and \
- self._num_allowed4incumbent == 0:
+ if (
+ self._num_complete4incumbent >= 2 * self.dim
+ and self._num_allowed4incumbent == 0
+ ):
self._num_allowed4incumbent = 2
if self._num_complete4incumbent == self.dir and (
- not self._resource or self._resource == self.max_resource):
+ not self._resource or self._resource == self.max_resource
+ ):
# check stuck condition if using max resource
self._num_complete4incumbent -= 2
if self._num_allowed4incumbent < 2:
@@ -345,8 +369,7 @@ class FLOW2(Searcher):
# elif proposed_by: del self._proposed_by[trial_id]
def on_trial_result(self, trial_id: str, result: Dict):
- ''' early update of incumbent
- '''
+ """early update of incumbent"""
if result:
obj = result.get(self._metric)
if obj:
@@ -373,27 +396,32 @@ class FLOW2(Searcher):
def rand_vector_unit_sphere(self, dim, trunc=0) -> np.ndarray:
vec = self._random.normal(0, 1, dim)
if 0 < trunc < dim:
- vec[np.abs(vec).argsort()[:dim - trunc]] = 0
+ vec[np.abs(vec).argsort()[: dim - trunc]] = 0
mag = np.linalg.norm(vec)
return vec / mag
def suggest(self, trial_id: str) -> Optional[Dict]:
- ''' suggest a new config, one of the following cases:
+ """suggest a new config, one of the following cases:
1. same incumbent, increase resource
2. same resource, move from the incumbent to a random direction
3. same resource, move from the incumbent to the opposite direction
#TODO: better decouple FLOW2 config suggestion and stepsize update
- '''
+ """
self.trial_count_proposed += 1
- if self._num_complete4incumbent > 0 and self.cost_incumbent and \
- self._resource and self._resource < self.max_resource and (
+ if (
+ self._num_complete4incumbent > 0
+ and self.cost_incumbent
+ and self._resource
+ and self._resource < self.max_resource
+ and (
self._cost_complete4incumbent
- >= self.cost_incumbent * self.resource_multiple_factor):
+ >= self.cost_incumbent * self.resource_multiple_factor
+ )
+ ):
# consider increasing resource using sum eval cost of complete
# configs
old_resource = self._resource
- self._resource = self._round(
- self._resource * self.resource_multiple_factor)
+ self._resource = self._round(self._resource * self.resource_multiple_factor)
self.cost_incumbent *= self._resource / old_resource
config = self.best_config.copy()
config[self.prune_attr] = self._resource
@@ -409,8 +437,9 @@ class FLOW2(Searcher):
self._direction_tried = None
else:
# propose a new direction
- self._direction_tried = self.rand_vector_unit_sphere(
- self.dim, self._trunc) * self.step
+ self._direction_tried = (
+ self.rand_vector_unit_sphere(self.dim, self._trunc) * self.step
+ )
for i, key in enumerate(self._tunable_keys):
move[key] += self._direction_tried[i]
self._project(move)
@@ -442,7 +471,8 @@ class FLOW2(Searcher):
break
self._same = same
if self._num_proposedby_incumbent == self.dir and (
- not self._resource or self._resource == self.max_resource):
+ not self._resource or self._resource == self.max_resource
+ ):
# check stuck condition if using max resource
self._num_proposedby_incumbent -= 2
self._init_phase = False
@@ -459,11 +489,11 @@ class FLOW2(Searcher):
# random
for i, key in enumerate(self._tunable_keys):
if self._direction_tried[i] != 0:
- for _, generated in generate_variants({'config': {
- key: self._space[key]
- }}):
- if generated['config'][key] != best_config[key]:
- config[key] = generated['config'][key]
+ for _, generated in generate_variants(
+ {"config": {key: self._space[key]}}
+ ):
+ if generated["config"][key] != best_config[key]:
+ config[key] = generated["config"][key]
return unflatten_dict(config)
break
else:
@@ -477,8 +507,7 @@ class FLOW2(Searcher):
return unflatten_dict(config)
def _project(self, config):
- ''' project normalized config in the feasible region and set prune_attr
- '''
+ """project normalized config in the feasible region and set prune_attr"""
for key in self._bounded_keys:
value = config[key]
config[key] = max(0, min(1, value))
@@ -487,14 +516,13 @@ class FLOW2(Searcher):
@property
def can_suggest(self) -> bool:
- ''' can't suggest if 2*dim configs have been proposed for the incumbent
- while fewer are completed
- '''
+ """can't suggest if 2*dim configs have been proposed for the incumbent
+ while fewer are completed
+ """
return self._num_allowed4incumbent > 0
def config_signature(self, config, space: Dict = None) -> tuple:
- ''' return the signature tuple of a config
- '''
+ """return the signature tuple of a config"""
config = flatten_dict(config)
if space:
space = flatten_dict(space)
@@ -514,8 +542,11 @@ class FLOW2(Searcher):
if self.hierarchical:
# can't remove constant for hierarchical search space,
# e.g., learner
- if not (domain is None or type(domain) in (str, int, float)
- or isinstance(domain, sample.Domain)):
+ if not (
+ domain is None
+ or type(domain) in (str, int, float)
+ or isinstance(domain, sample.Domain)
+ ):
# not domain or hashable
# get rid of list type for hierarchical search space.
continue
@@ -527,16 +558,14 @@ class FLOW2(Searcher):
@property
def converged(self) -> bool:
- ''' return whether the local search has converged
- '''
+ """return whether the local search has converged"""
if self._num_complete4incumbent < self.dir - 2:
return False
# check stepsize after enough configs are completed
return self.step < self.step_lower_bound
def reach(self, other: Searcher) -> bool:
- ''' whether the incumbent can reach the incumbent of other
- '''
+ """whether the incumbent can reach the incumbent of other"""
config1, config2 = self.best_config, other.best_config
incumbent1, incumbent2 = self.incumbent, other.incumbent
if self._resource and config1[self.prune_attr] > config2[self.prune_attr]:
@@ -547,6 +576,9 @@ class FLOW2(Searcher):
if config1[key] != config2.get(key):
return False
delta = np.array(
- [incumbent1[key] - incumbent2.get(key, np.inf)
- for key in self._tunable_keys])
+ [
+ incumbent1[key] - incumbent2.get(key, np.inf)
+ for key in self._tunable_keys
+ ]
+ )
return np.linalg.norm(delta) <= self.step
diff --git a/flaml/searcher/online_searcher.py b/flaml/searcher/online_searcher.py
index 836717ab0..e90f68281 100644
--- a/flaml/searcher/online_searcher.py
+++ b/flaml/searcher/online_searcher.py
@@ -20,14 +20,19 @@ class BaseSearcher:
on_trial_complete()
"""
- def __init__(self,
- metric: Optional[str] = None,
- mode: Optional[str] = None,
- ):
+ def __init__(
+ self,
+ metric: Optional[str] = None,
+ mode: Optional[str] = None,
+ ):
pass
- def set_search_properties(self, metric: Optional[str] = None, mode: Optional[str] = None,
- config: Optional[Dict] = None):
+ def set_search_properties(
+ self,
+ metric: Optional[str] = None,
+ mode: Optional[str] = None,
+ config: Optional[Dict] = None,
+ ):
if metric:
self._metric = metric
if mode:
@@ -66,6 +71,7 @@ class ChampionFrontierSearcher(BaseSearcher):
(although not the same searcher_trial_id).
searcher_trial_id will be used in suggest()
"""
+
# ****the following constants are used when generating new challengers in
# the _query_config_oracle function
# how many item to add when doing the expansion
@@ -84,25 +90,26 @@ class ChampionFrontierSearcher(BaseSearcher):
# 0.95 of the previous best config's loss.
# NOTE: this setting depends on the assumption that (and thus
# _query_config_oracle) is only triggered when a better champion is found.
- CFO_SEARCHER_METRIC_NAME = 'pseudo_loss'
+ CFO_SEARCHER_METRIC_NAME = "pseudo_loss"
CFO_SEARCHER_LARGE_LOSS = 1e6
# the random seed used in generating numerical hyperparamter configs (when CFO is not used)
NUM_RANDOM_SEED = 111
- CHAMPION_TRIAL_NAME = 'champion_trial'
+ CHAMPION_TRIAL_NAME = "champion_trial"
TRIAL_CLASS = VowpalWabbitTrial
- def __init__(self,
- init_config: Dict,
- space: Optional[Dict] = None,
- metric: Optional[str] = None,
- mode: Optional[str] = None,
- random_seed: Optional[int] = 2345,
- online_trial_args: Optional[Dict] = {},
- nonpoly_searcher_name: Optional[str] = 'CFO'
- ):
- '''Constructor
+ def __init__(
+ self,
+ init_config: Dict,
+ space: Optional[Dict] = None,
+ metric: Optional[str] = None,
+ mode: Optional[str] = None,
+ random_seed: Optional[int] = 2345,
+ online_trial_args: Optional[Dict] = {},
+ nonpoly_searcher_name: Optional[str] = "CFO",
+ ):
+ """Constructor
Args:
init_config: dict
@@ -113,7 +120,7 @@ class ChampionFrontierSearcher(BaseSearcher):
online_trial_args: dict
nonpoly_searcher_name: A string to specify the search algorithm
for nonpoly hyperparameters
- '''
+ """
self._init_config = init_config
self._space = space
self._seed = random_seed
@@ -122,44 +129,62 @@ class ChampionFrontierSearcher(BaseSearcher):
self._random_state = np.random.RandomState(self._seed)
self._searcher_for_nonpoly_hp = {}
- self._space_of_nonpoly_hp = {}
+
# dicts to remember the mapping between searcher_trial_id and trial_id
- self._searcher_trialid_to_trialid = {} # key: searcher_trial_id, value: trial_id
- self._trialid_to_searcher_trial_id = {} # value: trial_id, key: searcher_trial_id
+ self._space_of_nonpoly_hp = {}
+
+ # key: searcher_trial_id, value: trial_id
+ self._searcher_trialid_to_trialid = {}
+
+ # value: trial_id, key: searcher_trial_id
+ self._trialid_to_searcher_trial_id = {}
+
self._challenger_list = []
# initialize the search in set_search_properties
- self.set_search_properties(config={self.CHAMPION_TRIAL_NAME: None}, init_call=True)
- logger.debug('using random seed %s in config oracle', self._seed)
+ self.set_search_properties(
+ setting={self.CHAMPION_TRIAL_NAME: None}, init_call=True
+ )
+ logger.debug("using random seed %s in config oracle", self._seed)
- def set_search_properties(self, metric: Optional[str] = None,
- mode: Optional[str] = None,
- config: Optional[Dict] = {},
- init_call: Optional[bool] = False):
- """Construct search space with given config, and setup the search
- """
+ def set_search_properties(
+ self,
+ metric: Optional[str] = None,
+ mode: Optional[str] = None,
+ config: Optional[Dict] = {},
+ setting: Optional[Dict] = {},
+ init_call: Optional[bool] = False,
+ ):
+ """Construct search space with given config, and setup the search"""
super().set_search_properties(metric, mode, config)
# *********Use ConfigOralce (i.e, self._generate_new_space to generate list of new challengers)
- logger.info('champion trial %s', config)
- champion_trial = config.get(self.CHAMPION_TRIAL_NAME, None)
+ logger.info("setting %s", setting)
+ champion_trial = setting.get(self.CHAMPION_TRIAL_NAME, None)
if champion_trial is None:
champion_trial = self._create_trial_from_config(self._init_config)
# generate a new list of challenger trials
- new_challenger_list = self._query_config_oracle(champion_trial.config,
- champion_trial.trial_id,
- self._trialid_to_searcher_trial_id[champion_trial.trial_id])
+ new_challenger_list = self._query_config_oracle(
+ champion_trial.config,
+ champion_trial.trial_id,
+ self._trialid_to_searcher_trial_id[champion_trial.trial_id],
+ )
# add the newly generated challengers to existing challengers
# there can be duplicates and we check duplicates when calling next_trial()
self._challenger_list = self._challenger_list + new_challenger_list
# add the champion as part of the new_challenger_list when called initially
if init_call:
self._challenger_list.append(champion_trial)
- logger.critical('Created challengers from champion %s', champion_trial.trial_id)
- logger.critical('New challenger size %s, %s', len(self._challenger_list),
- [t.trial_id for t in self._challenger_list])
+ logger.info(
+ "**Important** Created challengers from champion %s",
+ champion_trial.trial_id,
+ )
+ logger.info(
+ "New challenger size %s, %s",
+ len(self._challenger_list),
+ [t.trial_id for t in self._challenger_list],
+ )
def next_trial(self):
- """Return a trial from the _challenger_list
- """
+ """Return a trial from the _challenger_list"""
next_trial = None
if self._challenger_list:
next_trial = self._challenger_list.pop()
@@ -175,8 +200,9 @@ class ChampionFrontierSearcher(BaseSearcher):
self._trialid_to_searcher_trial_id[trial.trial_id] = searcher_trial_id
return trial
- def _query_config_oracle(self, seed_config, seed_config_trial_id,
- seed_config_searcher_trial_id=None) -> List[Trial]:
+ def _query_config_oracle(
+ self, seed_config, seed_config_trial_id, seed_config_searcher_trial_id=None
+ ) -> List[Trial]:
"""Give the seed config, generate a list of new configs (which are supposed to include
at least one config that has better performance than the input seed_config)
"""
@@ -189,12 +215,16 @@ class ChampionFrontierSearcher(BaseSearcher):
config_domain = self._space[k]
if isinstance(config_domain, PolynomialExpansionSet):
# get candidate configs for hyperparameters of the PolynomialExpansionSet type
- partial_new_configs = self._generate_independent_hp_configs(k, v, config_domain)
+ partial_new_configs = self._generate_independent_hp_configs(
+ k, v, config_domain
+ )
if partial_new_configs:
hyperparameter_config_groups.append(partial_new_configs)
# does not have searcher_trial_ids
searcher_trial_ids_groups.append([])
- elif isinstance(config_domain, Float) or isinstance(config_domain, Categorical):
+ elif isinstance(config_domain, Float) or isinstance(
+ config_domain, Categorical
+ ):
# otherwise we need to deal with them in group
nonpoly_config[k] = v
if k not in self._space_of_nonpoly_hp:
@@ -204,38 +234,57 @@ class ChampionFrontierSearcher(BaseSearcher):
if nonpoly_config:
new_searcher_trial_ids = []
partial_new_nonpoly_configs = []
- if 'CFO' in self._nonpoly_searcher_name:
+ if "CFO" in self._nonpoly_searcher_name:
if seed_config_trial_id not in self._searcher_for_nonpoly_hp:
- self._searcher_for_nonpoly_hp[seed_config_trial_id] = CFO(space=self._space_of_nonpoly_hp,
- points_to_evaluate=[nonpoly_config],
- metric=self.CFO_SEARCHER_METRIC_NAME,
- )
+ self._searcher_for_nonpoly_hp[seed_config_trial_id] = CFO(
+ space=self._space_of_nonpoly_hp,
+ points_to_evaluate=[nonpoly_config],
+ metric=self.CFO_SEARCHER_METRIC_NAME,
+ )
# initialize the search in set_search_properties
- self._searcher_for_nonpoly_hp[seed_config_trial_id].set_search_properties(
- config={'metric_target': self.CFO_SEARCHER_LARGE_LOSS})
+ self._searcher_for_nonpoly_hp[
+ seed_config_trial_id
+ ].set_search_properties(
+ setting={"metric_target": self.CFO_SEARCHER_LARGE_LOSS}
+ )
# We need to call this for once, such that the seed config in points_to_evaluate will be called
# to be tried
- self._searcher_for_nonpoly_hp[seed_config_trial_id].suggest(seed_config_searcher_trial_id)
+ self._searcher_for_nonpoly_hp[seed_config_trial_id].suggest(
+ seed_config_searcher_trial_id
+ )
# assuming minimization
- if self._searcher_for_nonpoly_hp[seed_config_trial_id].metric_target is None:
+ if (
+ self._searcher_for_nonpoly_hp[seed_config_trial_id].metric_target
+ is None
+ ):
pseudo_loss = self.CFO_SEARCHER_LARGE_LOSS
else:
- pseudo_loss = self._searcher_for_nonpoly_hp[seed_config_trial_id].metric_target * 0.95
+ pseudo_loss = (
+ self._searcher_for_nonpoly_hp[
+ seed_config_trial_id
+ ].metric_target
+ * 0.95
+ )
pseudo_result_to_report = {}
for k, v in nonpoly_config.items():
- pseudo_result_to_report['config/' + str(k)] = v
+ pseudo_result_to_report["config/" + str(k)] = v
pseudo_result_to_report[self.CFO_SEARCHER_METRIC_NAME] = pseudo_loss
- pseudo_result_to_report['time_total_s'] = 1
- self._searcher_for_nonpoly_hp[seed_config_trial_id].on_trial_complete(seed_config_searcher_trial_id,
- result=pseudo_result_to_report)
+ pseudo_result_to_report["time_total_s"] = 1
+ self._searcher_for_nonpoly_hp[seed_config_trial_id].on_trial_complete(
+ seed_config_searcher_trial_id, result=pseudo_result_to_report
+ )
while len(partial_new_nonpoly_configs) < self.NUMERICAL_NUM:
# suggest multiple times
new_searcher_trial_id = Trial.generate_id()
new_searcher_trial_ids.append(new_searcher_trial_id)
- suggestion = self._searcher_for_nonpoly_hp[seed_config_trial_id].suggest(new_searcher_trial_id)
+ suggestion = self._searcher_for_nonpoly_hp[
+ seed_config_trial_id
+ ].suggest(new_searcher_trial_id)
if suggestion is not None:
partial_new_nonpoly_configs.append(suggestion)
- logger.info('partial_new_nonpoly_configs %s', partial_new_nonpoly_configs)
+ logger.info(
+ "partial_new_nonpoly_configs %s", partial_new_nonpoly_configs
+ )
else:
raise NotImplementedError
if partial_new_nonpoly_configs:
@@ -244,9 +293,11 @@ class ChampionFrontierSearcher(BaseSearcher):
# ----------- coordinate generation of new challengers in the case of multiple groups
new_trials = []
for i in range(len(hyperparameter_config_groups)):
- logger.info('hyperparameter_config_groups[i] %s %s',
- len(hyperparameter_config_groups[i]),
- hyperparameter_config_groups[i])
+ logger.info(
+ "hyperparameter_config_groups[i] %s %s",
+ len(hyperparameter_config_groups[i]),
+ hyperparameter_config_groups[i],
+ )
for j, new_partial_config in enumerate(hyperparameter_config_groups[i]):
new_seed_config = seed_config.copy()
new_seed_config.update(new_partial_config)
@@ -260,32 +311,55 @@ class ChampionFrontierSearcher(BaseSearcher):
new_searcher_trial_id = searcher_trial_ids_groups[i][j]
else:
new_searcher_trial_id = None
- new_trial = self._create_trial_from_config(new_seed_config, new_searcher_trial_id)
+ new_trial = self._create_trial_from_config(
+ new_seed_config, new_searcher_trial_id
+ )
new_trials.append(new_trial)
- logger.info('new_configs %s', [t.trial_id for t in new_trials])
+ logger.info("new_configs %s", [t.trial_id for t in new_trials])
return new_trials
- def _generate_independent_hp_configs(self, hp_name, current_config_value, config_domain) -> List:
+ def _generate_independent_hp_configs(
+ self, hp_name, current_config_value, config_domain
+ ) -> List:
if isinstance(config_domain, PolynomialExpansionSet):
- seed_interactions = list(current_config_value) + list(config_domain.init_monomials)
- logger.critical('Seed namespaces (singletons and interactions): %s', seed_interactions)
- logger.info('current_config_value %s %s', current_config_value, seed_interactions)
- configs = self._generate_poly_expansion_sets(seed_interactions,
- self.EXPANSION_ORDER,
- config_domain.allow_self_inter,
- config_domain.highest_poly_order,
- self.POLY_EXPANSION_ADDITION_NUM,
- )
+ seed_interactions = list(current_config_value) + list(
+ config_domain.init_monomials
+ )
+ logger.info(
+ "**Important** Seed namespaces (singletons and interactions): %s",
+ seed_interactions,
+ )
+ logger.info("current_config_value %s", current_config_value)
+ configs = self._generate_poly_expansion_sets(
+ seed_interactions,
+ self.EXPANSION_ORDER,
+ config_domain.allow_self_inter,
+ config_domain.highest_poly_order,
+ self.POLY_EXPANSION_ADDITION_NUM,
+ )
else:
raise NotImplementedError
configs_w_key = [{hp_name: hp_config} for hp_config in configs]
return configs_w_key
- def _generate_poly_expansion_sets(self, seed_interactions, order, allow_self_inter,
- highest_poly_order, interaction_num_to_add):
- champion_all_combinations = self._generate_all_comb(seed_interactions, order, allow_self_inter, highest_poly_order)
- space = sorted(list(itertools.combinations(
- champion_all_combinations, interaction_num_to_add)))
+ def _generate_poly_expansion_sets(
+ self,
+ seed_interactions,
+ order,
+ allow_self_inter,
+ highest_poly_order,
+ interaction_num_to_add,
+ ):
+ champion_all_combinations = self._generate_all_comb(
+ seed_interactions, order, allow_self_inter, highest_poly_order
+ )
+ space = sorted(
+ list(
+ itertools.combinations(
+ champion_all_combinations, interaction_num_to_add
+ )
+ )
+ )
self._random_state.shuffle(space)
candidate_configs = [set(seed_interactions) | set(item) for item in space]
final_candidate_configs = []
@@ -295,9 +369,12 @@ class ChampionFrontierSearcher(BaseSearcher):
return final_candidate_configs
@staticmethod
- def _generate_all_comb(seed_interactions: list, seed_interaction_order: int,
- allow_self_inter: Optional[bool] = False,
- highest_poly_order: Optional[int] = None):
+ def _generate_all_comb(
+ seed_interactions: list,
+ seed_interaction_order: int,
+ allow_self_inter: Optional[bool] = False,
+ highest_poly_order: Optional[int] = None,
+ ):
"""Generate new interactions by doing up to seed_interaction_order on the seed_interactions
Args:
@@ -312,8 +389,7 @@ class ChampionFrontierSearcher(BaseSearcher):
"""
def get_interactions(list1, list2):
- """Get combinatorial list of tuples
- """
+ """Get combinatorial list of tuples"""
new_list = []
for i in list1:
for j in list2:
@@ -321,19 +397,18 @@ class ChampionFrontierSearcher(BaseSearcher):
# 'abc' 'cba' 'bca' are all 'abc'
# this is done to ensure we can use the config as the signature
# of the trial, i.e., trial id.
- new_interaction = ''.join(sorted(i + j))
+ new_interaction = "".join(sorted(i + j))
if new_interaction not in new_list:
new_list.append(new_interaction)
return new_list
def strip_self_inter(s):
- """Remove duplicates in an interaction string
- """
+ """Remove duplicates in an interaction string"""
if len(s) == len(set(s)):
return s
else:
# return ''.join(sorted(set(s)))
- new_s = ''
+ new_s = ""
char_list = []
for i in s:
if i not in char_list:
@@ -351,10 +426,15 @@ class ChampionFrontierSearcher(BaseSearcher):
all_interactions_no_self_inter = []
for s in all_interactions:
s_no_inter = strip_self_inter(s)
- if len(s_no_inter) > 1 and s_no_inter not in all_interactions_no_self_inter:
+ if (
+ len(s_no_inter) > 1
+ and s_no_inter not in all_interactions_no_self_inter
+ ):
all_interactions_no_self_inter.append(s_no_inter)
all_interactions = all_interactions_no_self_inter
if highest_poly_order is not None:
- all_interactions = [c for c in all_interactions if len(c) <= highest_poly_order]
- logger.info('all_combinations %s', all_interactions)
+ all_interactions = [
+ c for c in all_interactions if len(c) <= highest_poly_order
+ ]
+ logger.info("all_combinations %s", all_interactions)
return all_interactions
diff --git a/flaml/searcher/search_thread.py b/flaml/searcher/search_thread.py
index f3118c753..e04dccdf9 100644
--- a/flaml/searcher/search_thread.py
+++ b/flaml/searcher/search_thread.py
@@ -54,7 +54,7 @@ class SearchThread:
@classmethod
def set_eps(cls, time_budget_s):
- cls._eps = max(min(time_budget_s / 1000.0, 1.0), 1e-10)
+ cls._eps = max(min(time_budget_s / 1000.0, 1.0), 1e-9)
def suggest(self, trial_id: str) -> Optional[Dict]:
''' use the suggest() of the underlying search algorithm
diff --git a/flaml/tune/README.md b/flaml/tune/README.md
index aada353da..a0d5c6a83 100644
--- a/flaml/tune/README.md
+++ b/flaml/tune/README.md
@@ -1,6 +1,6 @@
# Economical Hyperparameter Optimization
-`flaml.tune` is a module for economical hyperparameter tuning. It frees users from manually tuning many hyperparameters for a software, such as machine learning training procedures.
+`flaml.tune` is a module for economical hyperparameter tuning. It frees users from manually tuning many hyperparameters for a software, such as machine learning training procedures.
It can be used standalone, or together with ray tune or nni.
* Example for sequential tuning (recommended when compute resource is limited and each trial can consume all the resources):
@@ -18,8 +18,8 @@ def evaluate_config(config):
# and the cost could be related to certain hyperparameters
# in this example, we assume it's proportional to x
time.sleep(config['x']/100000)
- # use tune.report to report the metric to optimize
- tune.report(metric=metric)
+ # use tune.report to report the metric to optimize
+ tune.report(metric=metric)
analysis = tune.run(
evaluate_config, # the function to evaluate a config
@@ -33,7 +33,7 @@ analysis = tune.run(
num_samples=-1, # the maximal number of configs to try, -1 means infinite
time_budget_s=60, # the time budget in seconds
local_dir='logs/', # the local directory to store logs
- # verbose=0, # verbosity
+ # verbose=0, # verbosity
# use_ray=True, # uncomment when performing parallel tuning using ray
)
@@ -57,8 +57,8 @@ def evaluate_config(config):
# and the cost could be related to certain hyperparameters
# in this example, we assume it's proportional to x
time.sleep(config['x']/100000)
- # use tune.report to report the metric to optimize
- tune.report(metric=metric)
+ # use tune.report to report the metric to optimize
+ tune.report(metric=metric)
# provide a time budget (in seconds) for the tuning process
time_budget_s = 60
@@ -77,25 +77,25 @@ cfo = CFO(low_cost_partial_config=low_cost_partial_config)
blendsearch = BlendSearch(
metric="metric", mode="min",
space=config_search_space,
- low_cost_partial_config=low_cost_partial_config)
+ low_cost_partial_config=low_cost_partial_config,
+ time_budget_s=time_budget_s
+)
# NOTE: when using BlendSearch as a search_alg in ray tune, you need to
-# configure the 'time_budget_s' for BlendSearch accordingly as follows such that
+# configure the 'time_budget_s' for BlendSearch accordingly such that
# BlendSearch is aware of the time budget. This step is not needed when
-# BlendSearch is used as the search_alg in flaml.tune as it is already done
-# automatically in flaml. Also, this step needs to be done after the search
-# space is passed to BlendSearch and before raytune.run.
-blendsearch.set_search_properties(config={"time_budget_s": time_budget_s})
+# BlendSearch is used as the search_alg in flaml.tune as it is done
+# automatically in flaml.
analysis = raytune.run(
evaluate_config, # the function to evaluate a config
config=config_search_space,
metric='metric', # the name of the metric used for optimization
mode='min', # the optimization mode, 'min' or 'max'
- num_samples=-1, # the maximal number of configs to try, -1 means infinite
+ num_samples=-1, # the maximal number of configs to try, -1 means infinite
time_budget_s=time_budget_s, # the time budget in seconds
local_dir='logs/', # the local directory to store logs
search_alg=blendsearch # or cfo
- )
+)
print(analysis.best_trial.last_result) # the best trial's result
print(analysis.best_config) # the best config
@@ -107,11 +107,10 @@ print(analysis.best_config) # the best config
$nnictl create --config ./config.yml
```
-* For more examples, please check out
+* For more examples, please check out
[notebooks](https://github.com/microsoft/FLAML/tree/main/notebook/).
-
-`flaml` offers two HPO methods: CFO and BlendSearch.
+`flaml` offers two HPO methods: CFO and BlendSearch.
`flaml.tune` uses BlendSearch by default.
## CFO: Frugal Optimization for Cost-related Hyperparameters
@@ -121,27 +120,27 @@ $nnictl create --config ./config.yml
-CFO uses the randomized direct search method FLOW2 with adaptive stepsize and random restart.
+CFO uses the randomized direct search method FLOW2 with adaptive stepsize and random restart.
It requires a low-cost initial point as input if such point exists.
The search begins with the low-cost initial point and gradually move to
high cost region if needed. The local search method has a provable convergence
-rate and bounded cost.
+rate and bounded cost.
-About FLOW2: FLOW2 is a simple yet effective randomized direct search method.
+About FLOW2: FLOW2 is a simple yet effective randomized direct search method.
It is an iterative optimization method that can optimize for black-box functions.
FLOW2 only requires pairwise comparisons between function values to perform iterative update. Comparing to existing HPO methods, FLOW2 has the following appealing properties:
+
1. It is applicable to general black-box functions with a good convergence rate in terms of loss.
-3. It provides theoretical guarantees on the total evaluation cost incurred.
+1. It provides theoretical guarantees on the total evaluation cost incurred.
The GIFs attached below demonstrate an example search trajectory of FLOW2 shown in the loss and evaluation cost (i.e., the training time ) space respectively. From the demonstration, we can see that (1) FLOW2 can quickly move toward the low-loss region, showing good convergence property and (2) FLOW2 tends to avoid exploring the high-cost region until necessary.
-
+ Figure 1. FLOW2 in tuning the # of leaves and the # of trees for XGBoost. The two background heatmaps show the loss and cost distribution of all configurations. The black dots are the points evaluated in FLOW2. Black dots connected by lines are points that yield better loss performance when evaluated.
-
Example:
```python
@@ -152,9 +151,9 @@ tune.run(...
```
Recommended scenario: there exist cost-related hyperparameters and a low-cost
-initial point is known before optimization.
+initial point is known before optimization.
If the search space is complex and CFO gets trapped into local optima, consider
-using BlendSearch.
+using BlendSearch.
## BlendSearch: Economical Hyperparameter Optimization With Blended Search Strategy
@@ -167,7 +166,7 @@ BlendSearch combines local search with global search. It leverages the frugality
of CFO and the space exploration ability of global search methods such as
Bayesian optimization. Like CFO, BlendSearch requires a low-cost initial point
as input if such point exists, and starts the search from there. Different from
-CFO, BlendSearch will not wait for the local search to fully converge before
+CFO, BlendSearch will not wait for the local search to fully converge before
trying new start points. The new start points are suggested by the global search
method and filtered based on their distance to the existing points in the
cost-related dimensions. BlendSearch still gradually increases the trial cost.
@@ -184,19 +183,18 @@ tune.run(...
)
```
-- Recommended scenario: cost-related hyperparameters exist, a low-cost
+* Recommended scenario: cost-related hyperparameters exist, a low-cost
initial point is known, and the search space is complex such that local search
is prone to be stuck at local optima.
-
-- Suggestion about using larger search space in BlendSearch:
+* Suggestion about using larger search space in BlendSearch:
In hyperparameter optimization, a larger search space is desirable because it is more likely to include the optimal configuration (or one of the optimal configurations) in hindsight. However the performance (especially anytime performance) of most existing HPO methods is undesirable if the cost of the configurations in the search space has a large variation. Thus hand-crafted small search spaces (with relatively homogeneous cost) are often used in practice for these methods, which is subject to idiosyncrasy. BlendSearch combines the benefits of local search and global search, which enables a smart (economical) way of deciding where to explore in the search space even though it is larger than necessary. This allows users to specify a larger search space in BlendSearch, which is often easier and a better practice than narrowing down the search space by hand.
For more technical details, please check our papers.
* [Frugal Optimization for Cost-related Hyperparameters](https://arxiv.org/abs/2005.01571). Qingyun Wu, Chi Wang, Silu Huang. AAAI 2021.
-```
+```bibtex
@inproceedings{wu2021cfo,
title={Frugal Optimization for Cost-related Hyperparameters},
author={Qingyun Wu and Chi Wang and Silu Huang},
@@ -207,11 +205,11 @@ For more technical details, please check our papers.
* [Economical Hyperparameter Optimization With Blended Search Strategy](https://www.microsoft.com/en-us/research/publication/economical-hyperparameter-optimization-with-blended-search-strategy/). Chi Wang, Qingyun Wu, Silu Huang, Amin Saied. ICLR 2021.
-```
+```bibtex
@inproceedings{wang2021blendsearch,
title={Economical Hyperparameter Optimization With Blended Search Strategy},
author={Chi Wang and Qingyun Wu and Silu Huang and Amin Saied},
year={2021},
booktitle={ICLR'21},
}
-```
\ No newline at end of file
+```
diff --git a/flaml/tune/trial_runner.py b/flaml/tune/trial_runner.py
index 690e57df3..75c5181ea 100644
--- a/flaml/tune/trial_runner.py
+++ b/flaml/tune/trial_runner.py
@@ -1,9 +1,10 @@
-'''!
- * Copyright (c) 2020-2021 Microsoft Corporation. All rights reserved.
+"""!
+ * Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the
* project root for license information.
-'''
+"""
from typing import Optional
+
# try:
# from ray import __version__ as ray_version
# assert ray_version >= '1.0.0'
@@ -11,20 +12,19 @@ from typing import Optional
# except (ImportError, AssertionError):
from .trial import Trial
import logging
+
logger = logging.getLogger(__name__)
-class Nologger():
- '''Logger without logging
- '''
+class Nologger:
+ """Logger without logging"""
def on_result(self, result):
pass
class SimpleTrial(Trial):
- '''A simple trial class
- '''
+ """A simple trial class"""
def __init__(self, config, trial_id=None):
self.trial_id = Trial.generate_id() if trial_id is None else trial_id
@@ -49,10 +49,13 @@ class BaseTrialRunner:
Note that the caller usually should not mutate trial state directly.
"""
- def __init__(self,
- search_alg=None, scheduler=None,
- metric: Optional[str] = None,
- mode: Optional[str] = 'min'):
+ def __init__(
+ self,
+ search_alg=None,
+ scheduler=None,
+ metric: Optional[str] = None,
+ mode: Optional[str] = "min",
+ ):
self._search_alg = search_alg
self._scheduler_alg = scheduler
self._trials = []
@@ -89,12 +92,12 @@ class BaseTrialRunner:
trial.set_status(Trial.PAUSED)
def stop_trial(self, trial):
- """Stops trial.
- """
+ """Stops trial."""
if trial.status not in [Trial.ERROR, Trial.TERMINATED]:
if self._scheduler_alg:
self._scheduler_alg.on_trial_complete(
- self, trial.trial_id, trial.last_result)
+ self, trial.trial_id, trial.last_result
+ )
self._search_alg.on_trial_complete(trial.trial_id, trial.last_result)
trial.set_status(Trial.TERMINATED)
elif self._scheduler_alg:
@@ -102,8 +105,7 @@ class BaseTrialRunner:
class SequentialTrialRunner(BaseTrialRunner):
- """Implementation of the sequential trial runner
- """
+ """Implementation of the sequential trial runner"""
def step(self) -> Trial:
"""Runs one step of the trial event loop.
@@ -114,7 +116,7 @@ class SequentialTrialRunner(BaseTrialRunner):
"""
trial_id = Trial.generate_id()
config = self._search_alg.suggest(trial_id)
- if config:
+ if config is not None:
trial = SimpleTrial(config, trial_id)
self.add_trial(trial)
trial.set_status(Trial.RUNNING)
diff --git a/flaml/tune/tune.py b/flaml/tune/tune.py
index 1d2db41ec..22a7cbd11 100644
--- a/flaml/tune/tune.py
+++ b/flaml/tune/tune.py
@@ -13,7 +13,10 @@ try:
assert ray_version >= "1.0.0"
from ray.tune.analysis import ExperimentAnalysis as EA
+
+ ray_import = True
except (ImportError, AssertionError):
+ ray_import = False
from .analysis import ExperimentAnalysis as EA
from .result import DEFAULT_METRIC
import logging
@@ -278,9 +281,9 @@ def run(
else:
logger.setLevel(logging.CRITICAL)
- if search_alg is None:
- from ..searcher.blendsearch import BlendSearch
+ from ..searcher.blendsearch import BlendSearch
+ if search_alg is None:
search_alg = BlendSearch(
metric=metric or DEFAULT_METRIC,
mode=mode,
@@ -299,16 +302,27 @@ def run(
metric_constraints=metric_constraints,
)
else:
- search_alg.set_search_properties(metric, mode, config)
if metric is None or mode is None:
metric = metric or search_alg.metric
mode = mode or search_alg.mode
- if time_budget_s or num_samples > 0:
- search_alg.set_search_properties(
- None,
- None,
- config={"time_budget_s": time_budget_s, "num_samples": num_samples},
- )
+ if ray_import:
+ from ray.tune.suggest import ConcurrencyLimiter
+ else:
+ from flaml.searcher.suggestion import ConcurrencyLimiter
+ searcher = (
+ search_alg.searcher
+ if isinstance(search_alg, ConcurrencyLimiter)
+ else search_alg
+ )
+ if isinstance(searcher, BlendSearch):
+ setting = {}
+ if time_budget_s:
+ setting["time_budget_s"] = time_budget_s
+ if num_samples > 0:
+ setting["num_samples"] = num_samples
+ searcher.set_search_properties(metric, mode, config, setting)
+ else:
+ searcher.set_search_properties(metric, mode, config)
scheduler = None
if report_intermediate_result:
params = {}
@@ -321,15 +335,10 @@ def run(
params["grace_period"] = min_resource
if reduction_factor:
params["reduction_factor"] = reduction_factor
- try:
- from ray import __version__ as ray_version
-
- assert ray_version >= "1.0.0"
+ if ray_import:
from ray.tune.schedulers import ASHAScheduler
scheduler = ASHAScheduler(**params)
- except (ImportError, AssertionError):
- pass
if use_ray:
try:
from ray import tune
@@ -392,7 +401,9 @@ def run(
else:
fail += 1 # break with ub consecutive failures
if fail == ub:
- logger.warning("fail to sample a trial for 10 times in a row, stopping.")
+ logger.warning(
+ f"fail to sample a trial for {max_failure} times in a row, stopping."
+ )
if verbose > 0:
logger.handlers.clear()
return ExperimentAnalysis(_runner.get_trials(), metric=metric, mode=mode)
diff --git a/notebook/flaml_automl.ipynb b/notebook/flaml_automl.ipynb
index 99336f19c..71c4b33d0 100644
--- a/notebook/flaml_automl.ipynb
+++ b/notebook/flaml_automl.ipynb
@@ -3,7 +3,7 @@
{
"cell_type": "markdown",
"source": [
- "Copyright (c) 2020-2021 Microsoft Corporation. All rights reserved. \n",
+ "Copyright (c) Microsoft Corporation. All rights reserved. \n",
"\n",
"Licensed under the MIT License.\n",
"\n",
@@ -13,8 +13,7 @@
"## 1. Introduction\n",
"\n",
"FLAML is a Python library (https://github.com/microsoft/FLAML) designed to automatically produce accurate machine learning models \n",
- "with low computational cost. It is fast and cheap. The simple and lightweight design makes it easy \n",
- "to use and extend, such as adding new learners. FLAML can \n",
+ "with low computational cost. It is fast and cheap. The simple and lightweight design makes it easy to use and extend, such as adding new learners. FLAML can \n",
"- serve as an economical AutoML engine,\n",
"- be used as a fast hyperparameter tuning tool, or \n",
"- be embedded in self-tuning software that requires low latency & resource in repetitive\n",
@@ -37,7 +36,10 @@
"cell_type": "code",
"execution_count": null,
"source": [
- "!pip install flaml[notebook];"
+ "!pip install flaml[notebook];\n",
+ "# from v0.6.6, catboost is made an optional dependency to build conda package.\n",
+ "# to install catboost, you can uncomment and run:\n",
+ "# !pip install flaml[catboost]"
],
"outputs": [],
"metadata": {}
@@ -117,7 +119,7 @@
" \"time_budget\": 240, # total running time in seconds\n",
" \"metric\": 'accuracy', # can be: 'r2', 'rmse', 'mae', 'mse', 'accuracy', 'roc_auc', 'roc_auc_ovr',\n",
" # 'roc_auc_ovo', 'log_loss', 'mape', 'f1', 'ap', 'ndcg', 'micro_f1', 'macro_f1'\n",
- " \"task\": 'classification', # task type \n",
+ " \"task\": 'classification', # task type\n",
" \"log_file_name\": 'airlines_experiment.log', # flaml log file\n",
" \"seed\": 7654321, # random seed\n",
"}"
@@ -141,270 +143,166 @@
"output_type": "stream",
"name": "stderr",
"text": [
- "[flaml.automl: 08-31 00:53:33] {1279} INFO - Evaluation method: holdout\n",
- "[flaml.automl: 08-31 00:53:34] {1312} INFO - Minimizing error metric: 1-accuracy\n",
- "[flaml.automl: 08-31 00:53:34] {1338} INFO - List of ML learners in AutoML Run: ['lgbm', 'rf', 'catboost', 'xgboost', 'extra_tree', 'lrl1']\n",
- "[flaml.automl: 08-31 00:53:34] {1532} INFO - iteration 0, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:34] {1689} INFO - at 1.4s,\tbest lgbm's error=0.3777,\tbest lgbm's error=0.3777\n",
- "[flaml.automl: 08-31 00:53:34] {1532} INFO - iteration 1, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:34] {1689} INFO - at 1.6s,\tbest lgbm's error=0.3777,\tbest lgbm's error=0.3777\n",
- "[flaml.automl: 08-31 00:53:34] {1532} INFO - iteration 2, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:34] {1689} INFO - at 1.7s,\tbest lgbm's error=0.3777,\tbest lgbm's error=0.3777\n",
- "[flaml.automl: 08-31 00:53:34] {1532} INFO - iteration 3, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:34] {1689} INFO - at 1.9s,\tbest lgbm's error=0.3661,\tbest lgbm's error=0.3661\n",
- "[flaml.automl: 08-31 00:53:34] {1532} INFO - iteration 4, current learner xgboost\n",
- "[flaml.automl: 08-31 00:53:35] {1689} INFO - at 2.1s,\tbest xgboost's error=0.3787,\tbest lgbm's error=0.3661\n",
- "[flaml.automl: 08-31 00:53:35] {1532} INFO - iteration 5, current learner xgboost\n",
- "[flaml.automl: 08-31 00:53:35] {1689} INFO - at 2.2s,\tbest xgboost's error=0.3769,\tbest lgbm's error=0.3661\n",
- "[flaml.automl: 08-31 00:53:35] {1532} INFO - iteration 6, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:53:35] {1689} INFO - at 2.4s,\tbest extra_tree's error=0.3788,\tbest lgbm's error=0.3661\n",
- "[flaml.automl: 08-31 00:53:35] {1532} INFO - iteration 7, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:35] {1689} INFO - at 2.5s,\tbest lgbm's error=0.3645,\tbest lgbm's error=0.3645\n",
- "[flaml.automl: 08-31 00:53:35] {1532} INFO - iteration 8, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:35] {1689} INFO - at 2.7s,\tbest lgbm's error=0.3645,\tbest lgbm's error=0.3645\n",
- "[flaml.automl: 08-31 00:53:35] {1532} INFO - iteration 9, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:35] {1689} INFO - at 2.8s,\tbest lgbm's error=0.3645,\tbest lgbm's error=0.3645\n",
- "[flaml.automl: 08-31 00:53:35] {1532} INFO - iteration 10, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:35] {1689} INFO - at 3.0s,\tbest lgbm's error=0.3610,\tbest lgbm's error=0.3610\n",
- "[flaml.automl: 08-31 00:53:35] {1532} INFO - iteration 11, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:53:36] {1689} INFO - at 3.2s,\tbest extra_tree's error=0.3763,\tbest lgbm's error=0.3610\n",
- "[flaml.automl: 08-31 00:53:36] {1532} INFO - iteration 12, current learner rf\n",
- "[flaml.automl: 08-31 00:53:36] {1689} INFO - at 3.4s,\tbest rf's error=0.3787,\tbest lgbm's error=0.3610\n",
- "[flaml.automl: 08-31 00:53:36] {1532} INFO - iteration 13, current learner rf\n",
- "[flaml.automl: 08-31 00:53:36] {1689} INFO - at 3.5s,\tbest rf's error=0.3689,\tbest lgbm's error=0.3610\n",
- "[flaml.automl: 08-31 00:53:36] {1532} INFO - iteration 14, current learner rf\n",
- "[flaml.automl: 08-31 00:53:36] {1689} INFO - at 3.7s,\tbest rf's error=0.3689,\tbest lgbm's error=0.3610\n",
- "[flaml.automl: 08-31 00:53:36] {1532} INFO - iteration 15, current learner rf\n",
- "[flaml.automl: 08-31 00:53:36] {1689} INFO - at 4.0s,\tbest rf's error=0.3689,\tbest lgbm's error=0.3610\n",
- "[flaml.automl: 08-31 00:53:36] {1532} INFO - iteration 16, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:37] {1689} INFO - at 4.2s,\tbest lgbm's error=0.3610,\tbest lgbm's error=0.3610\n",
- "[flaml.automl: 08-31 00:53:37] {1532} INFO - iteration 17, current learner xgboost\n",
- "[flaml.automl: 08-31 00:53:37] {1689} INFO - at 4.3s,\tbest xgboost's error=0.3765,\tbest lgbm's error=0.3610\n",
- "[flaml.automl: 08-31 00:53:37] {1532} INFO - iteration 18, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:37] {1689} INFO - at 4.9s,\tbest lgbm's error=0.3610,\tbest lgbm's error=0.3610\n",
- "[flaml.automl: 08-31 00:53:37] {1532} INFO - iteration 19, current learner rf\n",
- "[flaml.automl: 08-31 00:53:38] {1689} INFO - at 5.1s,\tbest rf's error=0.3689,\tbest lgbm's error=0.3610\n",
- "[flaml.automl: 08-31 00:53:38] {1532} INFO - iteration 20, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:38] {1689} INFO - at 5.3s,\tbest lgbm's error=0.3610,\tbest lgbm's error=0.3610\n",
- "[flaml.automl: 08-31 00:53:38] {1532} INFO - iteration 21, current learner rf\n",
- "[flaml.automl: 08-31 00:53:38] {1689} INFO - at 5.6s,\tbest rf's error=0.3689,\tbest lgbm's error=0.3610\n",
- "[flaml.automl: 08-31 00:53:38] {1532} INFO - iteration 22, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:38] {1689} INFO - at 6.0s,\tbest lgbm's error=0.3604,\tbest lgbm's error=0.3604\n",
- "[flaml.automl: 08-31 00:53:38] {1532} INFO - iteration 23, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:39] {1689} INFO - at 6.5s,\tbest lgbm's error=0.3545,\tbest lgbm's error=0.3545\n",
- "[flaml.automl: 08-31 00:53:39] {1532} INFO - iteration 24, current learner rf\n",
- "[flaml.automl: 08-31 00:53:39] {1689} INFO - at 6.8s,\tbest rf's error=0.3631,\tbest lgbm's error=0.3545\n",
- "[flaml.automl: 08-31 00:53:39] {1532} INFO - iteration 25, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:53:39] {1689} INFO - at 7.0s,\tbest extra_tree's error=0.3763,\tbest lgbm's error=0.3545\n",
- "[flaml.automl: 08-31 00:53:39] {1532} INFO - iteration 26, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:40] {1689} INFO - at 7.5s,\tbest lgbm's error=0.3523,\tbest lgbm's error=0.3523\n",
- "[flaml.automl: 08-31 00:53:40] {1532} INFO - iteration 27, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:40] {1689} INFO - at 8.0s,\tbest lgbm's error=0.3523,\tbest lgbm's error=0.3523\n",
- "[flaml.automl: 08-31 00:53:40] {1532} INFO - iteration 28, current learner rf\n",
- "[flaml.automl: 08-31 00:53:41] {1689} INFO - at 8.5s,\tbest rf's error=0.3621,\tbest lgbm's error=0.3523\n",
- "[flaml.automl: 08-31 00:53:41] {1532} INFO - iteration 29, current learner rf\n",
- "[flaml.automl: 08-31 00:53:41] {1689} INFO - at 8.7s,\tbest rf's error=0.3621,\tbest lgbm's error=0.3523\n",
- "[flaml.automl: 08-31 00:53:41] {1532} INFO - iteration 30, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:42] {1689} INFO - at 9.2s,\tbest lgbm's error=0.3523,\tbest lgbm's error=0.3523\n",
- "[flaml.automl: 08-31 00:53:42] {1532} INFO - iteration 31, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:42] {1689} INFO - at 9.7s,\tbest lgbm's error=0.3523,\tbest lgbm's error=0.3523\n",
- "[flaml.automl: 08-31 00:53:42] {1532} INFO - iteration 32, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:43] {1689} INFO - at 10.2s,\tbest lgbm's error=0.3523,\tbest lgbm's error=0.3523\n",
- "[flaml.automl: 08-31 00:53:43] {1532} INFO - iteration 33, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:53:43] {1689} INFO - at 10.4s,\tbest extra_tree's error=0.3763,\tbest lgbm's error=0.3523\n",
- "[flaml.automl: 08-31 00:53:43] {1532} INFO - iteration 34, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:46] {1689} INFO - at 13.3s,\tbest lgbm's error=0.3475,\tbest lgbm's error=0.3475\n",
- "[flaml.automl: 08-31 00:53:46] {1532} INFO - iteration 35, current learner catboost\n",
- "[flaml.automl: 08-31 00:53:46] {1689} INFO - at 13.9s,\tbest catboost's error=0.3602,\tbest lgbm's error=0.3475\n",
- "[flaml.automl: 08-31 00:53:46] {1532} INFO - iteration 36, current learner catboost\n",
- "[flaml.automl: 08-31 00:53:47] {1689} INFO - at 14.4s,\tbest catboost's error=0.3602,\tbest lgbm's error=0.3475\n",
- "[flaml.automl: 08-31 00:53:47] {1532} INFO - iteration 37, current learner xgboost\n",
- "[flaml.automl: 08-31 00:53:47] {1689} INFO - at 14.5s,\tbest xgboost's error=0.3746,\tbest lgbm's error=0.3475\n",
- "[flaml.automl: 08-31 00:53:47] {1532} INFO - iteration 38, current learner catboost\n",
- "[flaml.automl: 08-31 00:53:47] {1689} INFO - at 14.8s,\tbest catboost's error=0.3602,\tbest lgbm's error=0.3475\n",
- "[flaml.automl: 08-31 00:53:47] {1532} INFO - iteration 39, current learner catboost\n",
- "[flaml.automl: 08-31 00:53:48] {1689} INFO - at 15.4s,\tbest catboost's error=0.3602,\tbest lgbm's error=0.3475\n",
- "[flaml.automl: 08-31 00:53:48] {1532} INFO - iteration 40, current learner catboost\n",
- "[flaml.automl: 08-31 00:53:48] {1689} INFO - at 15.6s,\tbest catboost's error=0.3602,\tbest lgbm's error=0.3475\n",
- "[flaml.automl: 08-31 00:53:48] {1532} INFO - iteration 41, current learner catboost\n",
- "[flaml.automl: 08-31 00:53:50] {1689} INFO - at 17.4s,\tbest catboost's error=0.3493,\tbest lgbm's error=0.3475\n",
- "[flaml.automl: 08-31 00:53:50] {1532} INFO - iteration 42, current learner xgboost\n",
- "[flaml.automl: 08-31 00:53:50] {1689} INFO - at 17.6s,\tbest xgboost's error=0.3673,\tbest lgbm's error=0.3475\n",
- "[flaml.automl: 08-31 00:53:50] {1532} INFO - iteration 43, current learner lgbm\n",
- "[flaml.automl: 08-31 00:53:52] {1689} INFO - at 19.8s,\tbest lgbm's error=0.3475,\tbest lgbm's error=0.3475\n",
- "[flaml.automl: 08-31 00:53:52] {1532} INFO - iteration 44, current learner xgboost\n",
- "[flaml.automl: 08-31 00:53:52] {1689} INFO - at 19.9s,\tbest xgboost's error=0.3673,\tbest lgbm's error=0.3475\n",
- "[flaml.automl: 08-31 00:53:52] {1532} INFO - iteration 45, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:53:53] {1689} INFO - at 20.1s,\tbest extra_tree's error=0.3755,\tbest lgbm's error=0.3475\n",
- "[flaml.automl: 08-31 00:53:53] {1532} INFO - iteration 46, current learner xgboost\n",
- "[flaml.automl: 08-31 00:53:53] {1689} INFO - at 20.3s,\tbest xgboost's error=0.3617,\tbest lgbm's error=0.3475\n",
- "[flaml.automl: 08-31 00:53:53] {1532} INFO - iteration 47, current learner catboost\n",
- "[flaml.automl: 08-31 00:53:55] {1689} INFO - at 22.6s,\tbest catboost's error=0.3469,\tbest catboost's error=0.3469\n",
- "[flaml.automl: 08-31 00:53:55] {1532} INFO - iteration 48, current learner xgboost\n",
- "[flaml.automl: 08-31 00:53:55] {1689} INFO - at 22.8s,\tbest xgboost's error=0.3617,\tbest catboost's error=0.3469\n",
- "[flaml.automl: 08-31 00:53:55] {1532} INFO - iteration 49, current learner lgbm\n",
- "[flaml.automl: 08-31 00:54:00] {1689} INFO - at 27.4s,\tbest lgbm's error=0.3455,\tbest lgbm's error=0.3455\n",
- "[flaml.automl: 08-31 00:54:00] {1532} INFO - iteration 50, current learner catboost\n",
- "[flaml.automl: 08-31 00:54:01] {1689} INFO - at 29.0s,\tbest catboost's error=0.3469,\tbest lgbm's error=0.3455\n",
- "[flaml.automl: 08-31 00:54:01] {1532} INFO - iteration 51, current learner xgboost\n",
- "[flaml.automl: 08-31 00:54:02] {1689} INFO - at 29.2s,\tbest xgboost's error=0.3617,\tbest lgbm's error=0.3455\n",
- "[flaml.automl: 08-31 00:54:02] {1532} INFO - iteration 52, current learner catboost\n",
- "[flaml.automl: 08-31 00:54:03] {1689} INFO - at 30.3s,\tbest catboost's error=0.3469,\tbest lgbm's error=0.3455\n",
- "[flaml.automl: 08-31 00:54:03] {1532} INFO - iteration 53, current learner rf\n",
- "[flaml.automl: 08-31 00:54:03] {1689} INFO - at 30.7s,\tbest rf's error=0.3621,\tbest lgbm's error=0.3455\n",
- "[flaml.automl: 08-31 00:54:03] {1532} INFO - iteration 54, current learner lgbm\n",
- "[flaml.automl: 08-31 00:54:10] {1689} INFO - at 37.2s,\tbest lgbm's error=0.3444,\tbest lgbm's error=0.3444\n",
- "[flaml.automl: 08-31 00:54:10] {1532} INFO - iteration 55, current learner lgbm\n",
- "[flaml.automl: 08-31 00:54:14] {1689} INFO - at 41.7s,\tbest lgbm's error=0.3444,\tbest lgbm's error=0.3444\n",
- "[flaml.automl: 08-31 00:54:14] {1532} INFO - iteration 56, current learner xgboost\n",
- "[flaml.automl: 08-31 00:54:14] {1689} INFO - at 41.9s,\tbest xgboost's error=0.3591,\tbest lgbm's error=0.3444\n",
- "[flaml.automl: 08-31 00:54:14] {1532} INFO - iteration 57, current learner lgbm\n",
- "[flaml.automl: 08-31 00:54:22] {1689} INFO - at 49.3s,\tbest lgbm's error=0.3444,\tbest lgbm's error=0.3444\n",
- "[flaml.automl: 08-31 00:54:22] {1532} INFO - iteration 58, current learner rf\n",
- "[flaml.automl: 08-31 00:54:22] {1689} INFO - at 49.8s,\tbest rf's error=0.3567,\tbest lgbm's error=0.3444\n",
- "[flaml.automl: 08-31 00:54:22] {1532} INFO - iteration 59, current learner rf\n",
- "[flaml.automl: 08-31 00:54:23] {1689} INFO - at 50.2s,\tbest rf's error=0.3567,\tbest lgbm's error=0.3444\n",
- "[flaml.automl: 08-31 00:54:23] {1532} INFO - iteration 60, current learner lgbm\n",
- "[flaml.automl: 08-31 00:54:28] {1689} INFO - at 55.4s,\tbest lgbm's error=0.3356,\tbest lgbm's error=0.3356\n",
- "[flaml.automl: 08-31 00:54:28] {1532} INFO - iteration 61, current learner catboost\n",
- "[flaml.automl: 08-31 00:54:32] {1689} INFO - at 59.4s,\tbest catboost's error=0.3469,\tbest lgbm's error=0.3356\n",
- "[flaml.automl: 08-31 00:54:32] {1532} INFO - iteration 62, current learner lgbm\n",
- "[flaml.automl: 08-31 00:54:37] {1689} INFO - at 64.7s,\tbest lgbm's error=0.3300,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:54:37] {1532} INFO - iteration 63, current learner rf\n",
- "[flaml.automl: 08-31 00:54:38] {1689} INFO - at 65.4s,\tbest rf's error=0.3565,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:54:38] {1532} INFO - iteration 64, current learner xgboost\n",
- "[flaml.automl: 08-31 00:54:38] {1689} INFO - at 65.6s,\tbest xgboost's error=0.3591,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:54:38] {1532} INFO - iteration 65, current learner lgbm\n",
- "[flaml.automl: 08-31 00:54:43] {1689} INFO - at 70.4s,\tbest lgbm's error=0.3300,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:54:43] {1532} INFO - iteration 66, current learner lgbm\n",
- "[flaml.automl: 08-31 00:54:54] {1689} INFO - at 81.3s,\tbest lgbm's error=0.3300,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:54:54] {1532} INFO - iteration 67, current learner xgboost\n",
- "[flaml.automl: 08-31 00:54:54] {1689} INFO - at 81.5s,\tbest xgboost's error=0.3591,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:54:54] {1532} INFO - iteration 68, current learner lgbm\n",
- "[flaml.automl: 08-31 00:54:57] {1689} INFO - at 84.9s,\tbest lgbm's error=0.3300,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:54:57] {1532} INFO - iteration 69, current learner lgbm\n",
- "[flaml.automl: 08-31 00:55:01] {1689} INFO - at 88.3s,\tbest lgbm's error=0.3300,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:01] {1532} INFO - iteration 70, current learner xgboost\n",
- "[flaml.automl: 08-31 00:55:01] {1689} INFO - at 88.5s,\tbest xgboost's error=0.3591,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:01] {1532} INFO - iteration 71, current learner xgboost\n",
- "[flaml.automl: 08-31 00:55:01] {1689} INFO - at 88.6s,\tbest xgboost's error=0.3591,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:01] {1532} INFO - iteration 72, current learner catboost\n",
- "[flaml.automl: 08-31 00:55:04] {1689} INFO - at 91.8s,\tbest catboost's error=0.3469,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:04] {1532} INFO - iteration 73, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:55:05] {1689} INFO - at 92.1s,\tbest extra_tree's error=0.3755,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:05] {1532} INFO - iteration 74, current learner xgboost\n",
- "[flaml.automl: 08-31 00:55:05] {1689} INFO - at 92.3s,\tbest xgboost's error=0.3591,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:05] {1532} INFO - iteration 75, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:55:05] {1689} INFO - at 92.5s,\tbest extra_tree's error=0.3644,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:05] {1532} INFO - iteration 76, current learner xgboost\n",
- "[flaml.automl: 08-31 00:55:05] {1689} INFO - at 92.9s,\tbest xgboost's error=0.3574,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:05] {1532} INFO - iteration 77, current learner xgboost\n",
- "[flaml.automl: 08-31 00:55:06] {1689} INFO - at 93.3s,\tbest xgboost's error=0.3574,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:06] {1532} INFO - iteration 78, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:55:06] {1689} INFO - at 93.5s,\tbest extra_tree's error=0.3644,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:06] {1532} INFO - iteration 79, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:55:06] {1689} INFO - at 93.7s,\tbest extra_tree's error=0.3644,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:06] {1532} INFO - iteration 80, current learner rf\n",
- "[flaml.automl: 08-31 00:55:07] {1689} INFO - at 94.3s,\tbest rf's error=0.3565,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:07] {1532} INFO - iteration 81, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:55:07] {1689} INFO - at 94.5s,\tbest extra_tree's error=0.3644,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:07] {1532} INFO - iteration 82, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:55:07] {1689} INFO - at 94.7s,\tbest extra_tree's error=0.3644,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:07] {1532} INFO - iteration 83, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:55:07] {1689} INFO - at 94.9s,\tbest extra_tree's error=0.3644,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:07] {1532} INFO - iteration 84, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:55:08] {1689} INFO - at 95.2s,\tbest extra_tree's error=0.3644,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:08] {1532} INFO - iteration 85, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:55:08] {1689} INFO - at 95.5s,\tbest extra_tree's error=0.3640,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:08] {1532} INFO - iteration 86, current learner rf\n",
- "[flaml.automl: 08-31 00:55:09] {1689} INFO - at 96.5s,\tbest rf's error=0.3565,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:09] {1532} INFO - iteration 87, current learner xgboost\n",
- "[flaml.automl: 08-31 00:55:09] {1689} INFO - at 96.9s,\tbest xgboost's error=0.3534,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:09] {1532} INFO - iteration 88, current learner xgboost\n",
- "[flaml.automl: 08-31 00:55:10] {1689} INFO - at 97.2s,\tbest xgboost's error=0.3534,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:10] {1532} INFO - iteration 89, current learner lgbm\n",
- "[flaml.automl: 08-31 00:55:19] {1689} INFO - at 106.4s,\tbest lgbm's error=0.3300,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:19] {1532} INFO - iteration 90, current learner xgboost\n",
- "[flaml.automl: 08-31 00:55:20] {1689} INFO - at 107.3s,\tbest xgboost's error=0.3504,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:20] {1532} INFO - iteration 91, current learner xgboost\n",
- "[flaml.automl: 08-31 00:55:20] {1689} INFO - at 107.8s,\tbest xgboost's error=0.3504,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:20] {1532} INFO - iteration 92, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:55:20] {1689} INFO - at 108.0s,\tbest extra_tree's error=0.3624,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:20] {1532} INFO - iteration 93, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:55:21] {1689} INFO - at 108.3s,\tbest extra_tree's error=0.3624,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:21] {1532} INFO - iteration 94, current learner xgboost\n",
- "[flaml.automl: 08-31 00:55:24] {1689} INFO - at 111.2s,\tbest xgboost's error=0.3504,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:24] {1532} INFO - iteration 95, current learner lgbm\n",
- "[flaml.automl: 08-31 00:55:27] {1689} INFO - at 114.6s,\tbest lgbm's error=0.3300,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:27] {1532} INFO - iteration 96, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:55:27] {1689} INFO - at 114.8s,\tbest extra_tree's error=0.3598,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:27] {1532} INFO - iteration 97, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:55:28] {1689} INFO - at 115.2s,\tbest extra_tree's error=0.3598,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:28] {1532} INFO - iteration 98, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:55:28] {1689} INFO - at 115.4s,\tbest extra_tree's error=0.3597,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:28] {1532} INFO - iteration 99, current learner lrl1\n",
- "No low-cost partial config given to the search algorithm. For cost-frugal search, consider providing low-cost values for cost-related hps via 'low_cost_partial_config'.\n",
- "/home/dmx/miniconda2/envs/blend/lib/python3.8/site-packages/sklearn/linear_model/_sag.py:328: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n",
+ "[flaml.automl: 10-08 15:12:49] {1458} INFO - Data split method: stratified\n",
+ "[flaml.automl: 10-08 15:12:49] {1462} INFO - Evaluation method: holdout\n",
+ "[flaml.automl: 10-08 15:12:49] {1510} INFO - Minimizing error metric: 1-accuracy\n",
+ "[flaml.automl: 10-08 15:12:49] {1547} INFO - List of ML learners in AutoML Run: ['lgbm', 'rf', 'xgboost', 'extra_tree', 'lrl1']\n",
+ "[flaml.automl: 10-08 15:12:49] {1777} INFO - iteration 0, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:12:50] {1894} INFO - Estimated sufficient time budget=318171s. Estimated necessary time budget=5298s.\n",
+ "[flaml.automl: 10-08 15:12:50] {1966} INFO - at 1.8s,\testimator lgbm's best error=0.3777,\tbest estimator lgbm's best error=0.3777\n",
+ "[flaml.automl: 10-08 15:12:50] {1777} INFO - iteration 1, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:12:51] {1966} INFO - at 2.4s,\testimator lgbm's best error=0.3759,\tbest estimator lgbm's best error=0.3759\n",
+ "[flaml.automl: 10-08 15:12:51] {1777} INFO - iteration 2, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:12:53] {1966} INFO - at 4.7s,\testimator lgbm's best error=0.3759,\tbest estimator lgbm's best error=0.3759\n",
+ "[flaml.automl: 10-08 15:12:53] {1777} INFO - iteration 3, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:12:53] {1966} INFO - at 5.0s,\testimator xgboost's best error=0.3787,\tbest estimator lgbm's best error=0.3759\n",
+ "[flaml.automl: 10-08 15:12:53] {1777} INFO - iteration 4, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:12:54] {1966} INFO - at 5.1s,\testimator lgbm's best error=0.3644,\tbest estimator lgbm's best error=0.3644\n",
+ "[flaml.automl: 10-08 15:12:54] {1777} INFO - iteration 5, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:12:54] {1966} INFO - at 5.3s,\testimator lgbm's best error=0.3588,\tbest estimator lgbm's best error=0.3588\n",
+ "[flaml.automl: 10-08 15:12:54] {1777} INFO - iteration 6, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:12:54] {1966} INFO - at 5.4s,\testimator lgbm's best error=0.3588,\tbest estimator lgbm's best error=0.3588\n",
+ "[flaml.automl: 10-08 15:12:54] {1777} INFO - iteration 7, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:12:54] {1966} INFO - at 5.9s,\testimator lgbm's best error=0.3555,\tbest estimator lgbm's best error=0.3555\n",
+ "[flaml.automl: 10-08 15:12:54] {1777} INFO - iteration 8, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:12:55] {1966} INFO - at 6.2s,\testimator lgbm's best error=0.3555,\tbest estimator lgbm's best error=0.3555\n",
+ "[flaml.automl: 10-08 15:12:55] {1777} INFO - iteration 9, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:12:55] {1966} INFO - at 6.3s,\testimator xgboost's best error=0.3649,\tbest estimator lgbm's best error=0.3555\n",
+ "[flaml.automl: 10-08 15:12:55] {1777} INFO - iteration 10, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:12:55] {1966} INFO - at 6.4s,\testimator xgboost's best error=0.3649,\tbest estimator lgbm's best error=0.3555\n",
+ "[flaml.automl: 10-08 15:12:55] {1777} INFO - iteration 11, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:12:55] {1966} INFO - at 6.6s,\testimator xgboost's best error=0.3649,\tbest estimator lgbm's best error=0.3555\n",
+ "[flaml.automl: 10-08 15:12:55] {1777} INFO - iteration 12, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:12:56] {1966} INFO - at 7.7s,\testimator lgbm's best error=0.3555,\tbest estimator lgbm's best error=0.3555\n",
+ "[flaml.automl: 10-08 15:12:56] {1777} INFO - iteration 13, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:12:56] {1966} INFO - at 7.8s,\testimator xgboost's best error=0.3629,\tbest estimator lgbm's best error=0.3555\n",
+ "[flaml.automl: 10-08 15:12:56] {1777} INFO - iteration 14, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:12:58] {1966} INFO - at 9.2s,\testimator lgbm's best error=0.3555,\tbest estimator lgbm's best error=0.3555\n",
+ "[flaml.automl: 10-08 15:12:58] {1777} INFO - iteration 15, current learner extra_tree\n",
+ "[flaml.automl: 10-08 15:12:58] {1966} INFO - at 9.4s,\testimator extra_tree's best error=0.3773,\tbest estimator lgbm's best error=0.3555\n",
+ "[flaml.automl: 10-08 15:12:58] {1777} INFO - iteration 16, current learner extra_tree\n",
+ "[flaml.automl: 10-08 15:12:58] {1966} INFO - at 9.5s,\testimator extra_tree's best error=0.3757,\tbest estimator lgbm's best error=0.3555\n",
+ "[flaml.automl: 10-08 15:12:58] {1777} INFO - iteration 17, current learner rf\n",
+ "[flaml.automl: 10-08 15:12:58] {1966} INFO - at 9.7s,\testimator rf's best error=0.3765,\tbest estimator lgbm's best error=0.3555\n",
+ "[flaml.automl: 10-08 15:12:58] {1777} INFO - iteration 18, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:12:59] {1966} INFO - at 10.7s,\testimator lgbm's best error=0.3542,\tbest estimator lgbm's best error=0.3542\n",
+ "[flaml.automl: 10-08 15:12:59] {1777} INFO - iteration 19, current learner rf\n",
+ "[flaml.automl: 10-08 15:12:59] {1966} INFO - at 10.9s,\testimator rf's best error=0.3724,\tbest estimator lgbm's best error=0.3542\n",
+ "[flaml.automl: 10-08 15:12:59] {1777} INFO - iteration 20, current learner rf\n",
+ "[flaml.automl: 10-08 15:13:00] {1966} INFO - at 11.1s,\testimator rf's best error=0.3724,\tbest estimator lgbm's best error=0.3542\n",
+ "[flaml.automl: 10-08 15:13:00] {1777} INFO - iteration 21, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:13:00] {1966} INFO - at 11.2s,\testimator xgboost's best error=0.3629,\tbest estimator lgbm's best error=0.3542\n",
+ "[flaml.automl: 10-08 15:13:00] {1777} INFO - iteration 22, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:13:02] {1966} INFO - at 13.2s,\testimator lgbm's best error=0.3507,\tbest estimator lgbm's best error=0.3507\n",
+ "[flaml.automl: 10-08 15:13:02] {1777} INFO - iteration 23, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:13:03] {1966} INFO - at 14.1s,\testimator lgbm's best error=0.3507,\tbest estimator lgbm's best error=0.3507\n",
+ "[flaml.automl: 10-08 15:13:03] {1777} INFO - iteration 24, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:13:03] {1966} INFO - at 14.2s,\testimator xgboost's best error=0.3612,\tbest estimator lgbm's best error=0.3507\n",
+ "[flaml.automl: 10-08 15:13:03] {1777} INFO - iteration 25, current learner extra_tree\n",
+ "[flaml.automl: 10-08 15:13:03] {1966} INFO - at 14.4s,\testimator extra_tree's best error=0.3757,\tbest estimator lgbm's best error=0.3507\n",
+ "[flaml.automl: 10-08 15:13:03] {1777} INFO - iteration 26, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:13:07] {1966} INFO - at 19.0s,\testimator lgbm's best error=0.3507,\tbest estimator lgbm's best error=0.3507\n",
+ "[flaml.automl: 10-08 15:13:07] {1777} INFO - iteration 27, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:13:08] {1966} INFO - at 19.1s,\testimator xgboost's best error=0.3612,\tbest estimator lgbm's best error=0.3507\n",
+ "[flaml.automl: 10-08 15:13:08] {1777} INFO - iteration 28, current learner extra_tree\n",
+ "[flaml.automl: 10-08 15:13:08] {1966} INFO - at 19.3s,\testimator extra_tree's best error=0.3757,\tbest estimator lgbm's best error=0.3507\n",
+ "[flaml.automl: 10-08 15:13:08] {1777} INFO - iteration 29, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:13:08] {1966} INFO - at 19.5s,\testimator xgboost's best error=0.3612,\tbest estimator lgbm's best error=0.3507\n",
+ "[flaml.automl: 10-08 15:13:08] {1777} INFO - iteration 30, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:13:09] {1966} INFO - at 20.9s,\testimator lgbm's best error=0.3507,\tbest estimator lgbm's best error=0.3507\n",
+ "[flaml.automl: 10-08 15:13:09] {1777} INFO - iteration 31, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:13:11] {1966} INFO - at 22.5s,\testimator lgbm's best error=0.3507,\tbest estimator lgbm's best error=0.3507\n",
+ "[flaml.automl: 10-08 15:13:11] {1777} INFO - iteration 32, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:13:23] {1966} INFO - at 34.3s,\testimator lgbm's best error=0.3404,\tbest estimator lgbm's best error=0.3404\n",
+ "[flaml.automl: 10-08 15:13:23] {1777} INFO - iteration 33, current learner extra_tree\n",
+ "[flaml.automl: 10-08 15:13:23] {1966} INFO - at 34.5s,\testimator extra_tree's best error=0.3757,\tbest estimator lgbm's best error=0.3404\n",
+ "[flaml.automl: 10-08 15:13:23] {1777} INFO - iteration 34, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:13:44] {1966} INFO - at 55.1s,\testimator lgbm's best error=0.3343,\tbest estimator lgbm's best error=0.3343\n",
+ "[flaml.automl: 10-08 15:13:44] {1777} INFO - iteration 35, current learner rf\n",
+ "[flaml.automl: 10-08 15:13:44] {1966} INFO - at 55.2s,\testimator rf's best error=0.3724,\tbest estimator lgbm's best error=0.3343\n",
+ "[flaml.automl: 10-08 15:13:44] {1777} INFO - iteration 36, current learner extra_tree\n",
+ "[flaml.automl: 10-08 15:13:44] {1966} INFO - at 55.4s,\testimator extra_tree's best error=0.3757,\tbest estimator lgbm's best error=0.3343\n",
+ "[flaml.automl: 10-08 15:13:44] {1777} INFO - iteration 37, current learner rf\n",
+ "[flaml.automl: 10-08 15:13:44] {1966} INFO - at 55.5s,\testimator rf's best error=0.3724,\tbest estimator lgbm's best error=0.3343\n",
+ "[flaml.automl: 10-08 15:13:44] {1777} INFO - iteration 38, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:13:44] {1966} INFO - at 55.7s,\testimator xgboost's best error=0.3612,\tbest estimator lgbm's best error=0.3343\n",
+ "[flaml.automl: 10-08 15:13:44] {1777} INFO - iteration 39, current learner rf\n",
+ "[flaml.automl: 10-08 15:13:44] {1966} INFO - at 56.0s,\testimator rf's best error=0.3719,\tbest estimator lgbm's best error=0.3343\n",
+ "[flaml.automl: 10-08 15:13:44] {1777} INFO - iteration 40, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:13:45] {1966} INFO - at 56.3s,\testimator xgboost's best error=0.3600,\tbest estimator lgbm's best error=0.3343\n",
+ "[flaml.automl: 10-08 15:13:45] {1777} INFO - iteration 41, current learner extra_tree\n",
+ "[flaml.automl: 10-08 15:13:45] {1966} INFO - at 56.5s,\testimator extra_tree's best error=0.3757,\tbest estimator lgbm's best error=0.3343\n",
+ "[flaml.automl: 10-08 15:13:45] {1777} INFO - iteration 42, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:13:56] {1966} INFO - at 67.1s,\testimator lgbm's best error=0.3343,\tbest estimator lgbm's best error=0.3343\n",
+ "[flaml.automl: 10-08 15:13:56] {1777} INFO - iteration 43, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:13:56] {1966} INFO - at 67.4s,\testimator xgboost's best error=0.3558,\tbest estimator lgbm's best error=0.3343\n",
+ "[flaml.automl: 10-08 15:13:56] {1777} INFO - iteration 44, current learner extra_tree\n",
+ "[flaml.automl: 10-08 15:13:56] {1966} INFO - at 67.5s,\testimator extra_tree's best error=0.3757,\tbest estimator lgbm's best error=0.3343\n",
+ "[flaml.automl: 10-08 15:13:56] {1777} INFO - iteration 45, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:13:56] {1966} INFO - at 67.8s,\testimator xgboost's best error=0.3558,\tbest estimator lgbm's best error=0.3343\n",
+ "[flaml.automl: 10-08 15:13:56] {1777} INFO - iteration 46, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:13:57] {1966} INFO - at 68.3s,\testimator xgboost's best error=0.3558,\tbest estimator lgbm's best error=0.3343\n",
+ "[flaml.automl: 10-08 15:13:57] {1777} INFO - iteration 47, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:13:57] {1966} INFO - at 68.6s,\testimator xgboost's best error=0.3558,\tbest estimator lgbm's best error=0.3343\n",
+ "[flaml.automl: 10-08 15:13:57] {1777} INFO - iteration 48, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:14:08] {1966} INFO - at 79.8s,\testimator lgbm's best error=0.3296,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:14:08] {1777} INFO - iteration 49, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:14:11] {1966} INFO - at 82.2s,\testimator xgboost's best error=0.3544,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:14:11] {1777} INFO - iteration 50, current learner extra_tree\n",
+ "[flaml.automl: 10-08 15:14:11] {1966} INFO - at 82.4s,\testimator extra_tree's best error=0.3753,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:14:11] {1777} INFO - iteration 51, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:14:34] {1966} INFO - at 105.2s,\testimator lgbm's best error=0.3296,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:14:34] {1777} INFO - iteration 52, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:14:43] {1966} INFO - at 114.1s,\testimator lgbm's best error=0.3296,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:14:43] {1777} INFO - iteration 53, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:15:04] {1966} INFO - at 135.7s,\testimator lgbm's best error=0.3296,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:15:04] {1777} INFO - iteration 54, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:15:09] {1966} INFO - at 140.0s,\testimator lgbm's best error=0.3296,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:15:09] {1777} INFO - iteration 55, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:15:12] {1966} INFO - at 143.5s,\testimator xgboost's best error=0.3494,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:15:12] {1777} INFO - iteration 56, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:16:03] {1966} INFO - at 194.7s,\testimator lgbm's best error=0.3296,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:16:03] {1777} INFO - iteration 57, current learner rf\n",
+ "[flaml.automl: 10-08 15:16:03] {1966} INFO - at 194.9s,\testimator rf's best error=0.3717,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:16:03] {1777} INFO - iteration 58, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:16:05] {1966} INFO - at 196.9s,\testimator xgboost's best error=0.3494,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:16:05] {1777} INFO - iteration 59, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:16:45] {1966} INFO - at 236.4s,\testimator lgbm's best error=0.3296,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:16:45] {1777} INFO - iteration 60, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:16:47] {1966} INFO - at 238.8s,\testimator xgboost's best error=0.3494,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:16:47] {1777} INFO - iteration 61, current learner rf\n",
+ "[flaml.automl: 10-08 15:16:47] {1966} INFO - at 238.8s,\testimator rf's best error=0.3717,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:16:47] {1777} INFO - iteration 62, current learner rf\n",
+ "[flaml.automl: 10-08 15:16:47] {1966} INFO - at 238.9s,\testimator rf's best error=0.3717,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:16:47] {1777} INFO - iteration 63, current learner lrl1\n",
+ "/home/dmx/miniconda2/envs/test/lib/python3.8/site-packages/sklearn/linear_model/_sag.py:328: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n",
" warnings.warn(\"The max_iter was reached which means \"\n",
- "[flaml.automl: 08-31 00:55:28] {1689} INFO - at 115.8s,\tbest lrl1's error=0.4338,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:28] {1532} INFO - iteration 100, current learner lrl1\n",
- "/home/dmx/miniconda2/envs/blend/lib/python3.8/site-packages/sklearn/linear_model/_sag.py:328: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n",
+ "[flaml.automl: 10-08 15:16:48] {1966} INFO - at 239.2s,\testimator lrl1's best error=0.4339,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:16:48] {1777} INFO - iteration 64, current learner lrl1\n",
+ "/home/dmx/miniconda2/envs/test/lib/python3.8/site-packages/sklearn/linear_model/_sag.py:328: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n",
" warnings.warn(\"The max_iter was reached which means \"\n",
- "[flaml.automl: 08-31 00:55:29] {1689} INFO - at 116.1s,\tbest lrl1's error=0.4338,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:29] {1532} INFO - iteration 101, current learner lrl1\n",
- "/home/dmx/miniconda2/envs/blend/lib/python3.8/site-packages/sklearn/linear_model/_sag.py:328: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n",
- " warnings.warn(\"The max_iter was reached which means \"\n",
- "[flaml.automl: 08-31 00:55:29] {1689} INFO - at 116.4s,\tbest lrl1's error=0.4338,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:29] {1532} INFO - iteration 102, current learner lrl1\n",
- "/home/dmx/miniconda2/envs/blend/lib/python3.8/site-packages/sklearn/linear_model/_sag.py:328: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n",
- " warnings.warn(\"The max_iter was reached which means \"\n",
- "[flaml.automl: 08-31 00:55:30] {1689} INFO - at 117.3s,\tbest lrl1's error=0.4334,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:30] {1532} INFO - iteration 103, current learner catboost\n",
- "[flaml.automl: 08-31 00:55:42] {1689} INFO - at 130.0s,\tbest catboost's error=0.3385,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:42] {1532} INFO - iteration 104, current learner lgbm\n",
- "[flaml.automl: 08-31 00:55:52] {1689} INFO - at 139.2s,\tbest lgbm's error=0.3300,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:55:52] {1532} INFO - iteration 105, current learner catboost\n",
- "[flaml.automl: 08-31 00:56:44] {1689} INFO - at 191.8s,\tbest catboost's error=0.3385,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:56:44] {1532} INFO - iteration 106, current learner lgbm\n",
- "[flaml.automl: 08-31 00:56:49] {1689} INFO - at 196.9s,\tbest lgbm's error=0.3300,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:56:49] {1532} INFO - iteration 107, current learner catboost\n",
- "[flaml.automl: 08-31 00:56:56] {1689} INFO - at 203.1s,\tbest catboost's error=0.3378,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:56:56] {1532} INFO - iteration 108, current learner xgboost\n",
- "[flaml.automl: 08-31 00:56:56] {1689} INFO - at 203.8s,\tbest xgboost's error=0.3504,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:56:56] {1532} INFO - iteration 109, current learner lgbm\n",
- "[flaml.automl: 08-31 00:57:02] {1689} INFO - at 209.3s,\tbest lgbm's error=0.3300,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:57:02] {1532} INFO - iteration 110, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:57:02] {1689} INFO - at 209.8s,\tbest extra_tree's error=0.3580,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:57:02] {1532} INFO - iteration 111, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:57:03] {1689} INFO - at 210.2s,\tbest extra_tree's error=0.3555,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:57:03] {1532} INFO - iteration 112, current learner lgbm\n",
- "[flaml.automl: 08-31 00:57:21] {1689} INFO - at 228.3s,\tbest lgbm's error=0.3300,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:57:21] {1532} INFO - iteration 113, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:57:21] {1689} INFO - at 228.6s,\tbest extra_tree's error=0.3555,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:57:21] {1532} INFO - iteration 114, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:57:21] {1689} INFO - at 229.0s,\tbest extra_tree's error=0.3555,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:57:21] {1532} INFO - iteration 115, current learner lrl1\n",
- "/home/dmx/miniconda2/envs/blend/lib/python3.8/site-packages/sklearn/linear_model/_sag.py:328: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n",
- " warnings.warn(\"The max_iter was reached which means \"\n",
- "[flaml.automl: 08-31 00:57:22] {1689} INFO - at 229.9s,\tbest lrl1's error=0.4334,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:57:22] {1532} INFO - iteration 116, current learner lgbm\n",
- "[flaml.automl: 08-31 00:57:25] {1689} INFO - at 232.4s,\tbest lgbm's error=0.3300,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:57:25] {1532} INFO - iteration 117, current learner lgbm\n",
- "[flaml.automl: 08-31 00:57:34] {1689} INFO - at 241.7s,\tbest lgbm's error=0.3300,\tbest lgbm's error=0.3300\n",
- "[flaml.automl: 08-31 00:57:34] {1766} INFO - selected model: LGBMClassifier(colsample_bytree=0.3841266992710469,\n",
- " learning_rate=0.04886499949999022, max_bin=512,\n",
- " min_child_samples=4, n_estimators=220, num_leaves=270,\n",
- " objective='binary', reg_alpha=0.0009765625,\n",
- " reg_lambda=0.07539015928723636, verbose=-1)\n",
- "[flaml.automl: 08-31 00:57:43] {1814} INFO - retrain lgbm for 9.1s\n",
- "[flaml.automl: 08-31 00:57:43] {1817} INFO - retrained model: LGBMClassifier(colsample_bytree=0.3841266992710469,\n",
- " learning_rate=0.04886499949999022, max_bin=512,\n",
- " min_child_samples=4, n_estimators=220, num_leaves=270,\n",
- " objective='binary', reg_alpha=0.0009765625,\n",
- " reg_lambda=0.07539015928723636, verbose=-1)\n",
- "[flaml.automl: 08-31 00:57:43] {1364} INFO - fit succeeded\n",
- "[flaml.automl: 08-31 00:57:43] {1365} INFO - Time taken to find the best model: 241.71747207641602\n",
- "[flaml.automl: 08-31 00:57:43] {1370} WARNING - Time taken to find the best model is 101% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n"
+ "[flaml.automl: 10-08 15:16:48] {1966} INFO - at 239.5s,\testimator lrl1's best error=0.4339,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:16:48] {1777} INFO - iteration 65, current learner rf\n",
+ "[flaml.automl: 10-08 15:16:48] {1966} INFO - at 239.6s,\testimator rf's best error=0.3717,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:16:48] {1777} INFO - iteration 66, current learner rf\n",
+ "[flaml.automl: 10-08 15:16:48] {1966} INFO - at 239.6s,\testimator rf's best error=0.3717,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:16:48] {1777} INFO - iteration 67, current learner extra_tree\n",
+ "[flaml.automl: 10-08 15:16:48] {1966} INFO - at 239.7s,\testimator extra_tree's best error=0.3753,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:16:48] {1777} INFO - iteration 68, current learner rf\n",
+ "[flaml.automl: 10-08 15:16:48] {1966} INFO - at 239.8s,\testimator rf's best error=0.3717,\tbest estimator lgbm's best error=0.3296\n",
+ "[flaml.automl: 10-08 15:16:48] {2073} INFO - selected model: LGBMClassifier(colsample_bytree=0.7263265270618353,\n",
+ " learning_rate=0.19240592731562936, max_bin=511,\n",
+ " min_child_samples=101, n_estimators=334, num_leaves=50,\n",
+ " reg_alpha=0.042474252908075376, reg_lambda=0.44574701224719,\n",
+ " verbose=-1)\n",
+ "[flaml.automl: 10-08 15:16:59] {2136} INFO - retrain lgbm for 10.5s\n",
+ "[flaml.automl: 10-08 15:16:59] {2142} INFO - retrained model: LGBMClassifier(colsample_bytree=0.7263265270618353,\n",
+ " learning_rate=0.19240592731562936, max_bin=511,\n",
+ " min_child_samples=101, n_estimators=334, num_leaves=50,\n",
+ " reg_alpha=0.042474252908075376, reg_lambda=0.44574701224719,\n",
+ " verbose=-1)\n",
+ "[flaml.automl: 10-08 15:16:59] {1571} INFO - fit succeeded\n",
+ "[flaml.automl: 10-08 15:16:59] {1572} INFO - Time taken to find the best model: 79.82886719703674\n"
]
}
],
@@ -444,9 +342,9 @@
"name": "stdout",
"text": [
"Best ML leaner: lgbm\n",
- "Best hyperparmeter config: {'n_estimators': 220, 'num_leaves': 270, 'min_child_samples': 4, 'learning_rate': 0.04886499949999022, 'log_max_bin': 10, 'colsample_bytree': 0.3841266992710469, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.07539015928723636, 'FLAML_sample_size': 364083}\n",
- "Best accuracy on validation data: 0.67\n",
- "Training duration of best run: 9.323 s\n"
+ "Best hyperparmeter config: {'n_estimators': 334, 'num_leaves': 50, 'min_child_samples': 101, 'learning_rate': 0.19240592731562936, 'log_max_bin': 9, 'colsample_bytree': 0.7263265270618353, 'reg_alpha': 0.042474252908075376, 'reg_lambda': 0.44574701224719, 'FLAML_sample_size': 364083}\n",
+ "Best accuracy on validation data: 0.6704\n",
+ "Training duration of best run: 11.24 s\n"
]
}
],
@@ -468,11 +366,11 @@
"output_type": "execute_result",
"data": {
"text/plain": [
- "LGBMClassifier(colsample_bytree=0.3841266992710469,\n",
- " learning_rate=0.04886499949999022, max_bin=512,\n",
- " min_child_samples=4, n_estimators=220, num_leaves=270,\n",
- " objective='binary', reg_alpha=0.0009765625,\n",
- " reg_lambda=0.07539015928723636, verbose=-1)"
+ "LGBMClassifier(colsample_bytree=0.7263265270618353,\n",
+ " learning_rate=0.19240592731562936, max_bin=511,\n",
+ " min_child_samples=101, n_estimators=334, num_leaves=50,\n",
+ " reg_alpha=0.042474252908075376, reg_lambda=0.44574701224719,\n",
+ " verbose=-1)"
]
},
"metadata": {},
@@ -555,9 +453,9 @@
"output_type": "stream",
"name": "stdout",
"text": [
- "accuracy = 0.6729231864497278\n",
- "roc_auc = 0.7261961112785199\n",
- "log_loss = 0.6033707263741326\n"
+ "accuracy = 0.6713287750470908\n",
+ "roc_auc = 0.7249878990284184\n",
+ "log_loss = 0.6035815508574605\n"
]
}
],
@@ -597,18 +495,15 @@
"name": "stdout",
"text": [
"{'Current Learner': 'lgbm', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.09999999999999995, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0, 'FLAML_sample_size': 10000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.09999999999999995, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0, 'FLAML_sample_size': 10000}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 12, 'num_leaves': 4, 'min_child_samples': 15, 'learning_rate': 0.2712162364070373, 'log_max_bin': 10, 'colsample_bytree': 0.9285002286474459, 'reg_alpha': 0.002668211515123386, 'reg_lambda': 0.5215467339232843, 'FLAML_sample_size': 10000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 12, 'num_leaves': 4, 'min_child_samples': 15, 'learning_rate': 0.2712162364070373, 'log_max_bin': 10, 'colsample_bytree': 0.9285002286474459, 'reg_alpha': 0.002668211515123386, 'reg_lambda': 0.5215467339232843, 'FLAML_sample_size': 10000}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 14, 'num_leaves': 5, 'min_child_samples': 9, 'learning_rate': 0.2835381908967212, 'log_max_bin': 9, 'colsample_bytree': 0.8304072431299575, 'reg_alpha': 0.0014132988481787994, 'reg_lambda': 0.033183495034912504, 'FLAML_sample_size': 10000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 14, 'num_leaves': 5, 'min_child_samples': 9, 'learning_rate': 0.2835381908967212, 'log_max_bin': 9, 'colsample_bytree': 0.8304072431299575, 'reg_alpha': 0.0014132988481787994, 'reg_lambda': 0.033183495034912504, 'FLAML_sample_size': 10000}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 53, 'num_leaves': 4, 'min_child_samples': 7, 'learning_rate': 0.15662398373030859, 'log_max_bin': 10, 'colsample_bytree': 0.7610534336273627, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.0064258982194552745, 'FLAML_sample_size': 10000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 53, 'num_leaves': 4, 'min_child_samples': 7, 'learning_rate': 0.15662398373030859, 'log_max_bin': 10, 'colsample_bytree': 0.7610534336273627, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.0064258982194552745, 'FLAML_sample_size': 10000}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 40000, 'Current Hyper-parameters': {'n_estimators': 53, 'num_leaves': 4, 'min_child_samples': 7, 'learning_rate': 0.15662398373030859, 'log_max_bin': 10, 'colsample_bytree': 0.7610534336273627, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.0064258982194552745, 'FLAML_sample_size': 40000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 53, 'num_leaves': 4, 'min_child_samples': 7, 'learning_rate': 0.15662398373030859, 'log_max_bin': 10, 'colsample_bytree': 0.7610534336273627, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.0064258982194552745, 'FLAML_sample_size': 40000}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 40000, 'Current Hyper-parameters': {'n_estimators': 89, 'num_leaves': 4, 'min_child_samples': 6, 'learning_rate': 0.2915468353191124, 'log_max_bin': 10, 'colsample_bytree': 0.8291836310024803, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.022917008702549507, 'FLAML_sample_size': 40000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 89, 'num_leaves': 4, 'min_child_samples': 6, 'learning_rate': 0.2915468353191124, 'log_max_bin': 10, 'colsample_bytree': 0.8291836310024803, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.022917008702549507, 'FLAML_sample_size': 40000}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 40000, 'Current Hyper-parameters': {'n_estimators': 53, 'num_leaves': 19, 'min_child_samples': 7, 'learning_rate': 0.15662398373030845, 'log_max_bin': 9, 'colsample_bytree': 0.7610534336273627, 'reg_alpha': 0.006958608037974516, 'reg_lambda': 0.0064258982194552745, 'FLAML_sample_size': 40000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 53, 'num_leaves': 19, 'min_child_samples': 7, 'learning_rate': 0.15662398373030845, 'log_max_bin': 9, 'colsample_bytree': 0.7610534336273627, 'reg_alpha': 0.006958608037974516, 'reg_lambda': 0.0064258982194552745, 'FLAML_sample_size': 40000}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 53, 'num_leaves': 19, 'min_child_samples': 7, 'learning_rate': 0.15662398373030845, 'log_max_bin': 9, 'colsample_bytree': 0.7610534336273627, 'reg_alpha': 0.006958608037974516, 'reg_lambda': 0.0064258982194552745, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 53, 'num_leaves': 19, 'min_child_samples': 7, 'learning_rate': 0.15662398373030845, 'log_max_bin': 9, 'colsample_bytree': 0.7610534336273627, 'reg_alpha': 0.006958608037974516, 'reg_lambda': 0.0064258982194552745, 'FLAML_sample_size': 364083}}\n",
- "{'Current Learner': 'catboost', 'Current Sample': 40000, 'Current Hyper-parameters': {'early_stopping_rounds': 11, 'learning_rate': 0.0943289179113066, 'FLAML_sample_size': 40000}, 'Best Learner': 'catboost', 'Best Hyper-parameters': {'early_stopping_rounds': 11, 'learning_rate': 0.0943289179113066, 'FLAML_sample_size': 40000}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 112, 'num_leaves': 42, 'min_child_samples': 6, 'learning_rate': 0.05081630216512539, 'log_max_bin': 8, 'colsample_bytree': 0.6140029119098487, 'reg_alpha': 0.0043659867548350275, 'reg_lambda': 0.00988067243709054, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 112, 'num_leaves': 42, 'min_child_samples': 6, 'learning_rate': 0.05081630216512539, 'log_max_bin': 8, 'colsample_bytree': 0.6140029119098487, 'reg_alpha': 0.0043659867548350275, 'reg_lambda': 0.00988067243709054, 'FLAML_sample_size': 364083}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 224, 'num_leaves': 19, 'min_child_samples': 3, 'learning_rate': 0.06277721620788371, 'log_max_bin': 9, 'colsample_bytree': 0.6641909243388362, 'reg_alpha': 0.02046640007359354, 'reg_lambda': 0.021186644668220945, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 224, 'num_leaves': 19, 'min_child_samples': 3, 'learning_rate': 0.06277721620788371, 'log_max_bin': 9, 'colsample_bytree': 0.6641909243388362, 'reg_alpha': 0.02046640007359354, 'reg_lambda': 0.021186644668220945, 'FLAML_sample_size': 364083}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 159, 'num_leaves': 54, 'min_child_samples': 3, 'learning_rate': 0.15480526013767984, 'log_max_bin': 9, 'colsample_bytree': 0.4565737938156385, 'reg_alpha': 0.014844095616079196, 'reg_lambda': 0.01191495998733094, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 159, 'num_leaves': 54, 'min_child_samples': 3, 'learning_rate': 0.15480526013767984, 'log_max_bin': 9, 'colsample_bytree': 0.4565737938156385, 'reg_alpha': 0.014844095616079196, 'reg_lambda': 0.01191495998733094, 'FLAML_sample_size': 364083}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 100, 'num_leaves': 310, 'min_child_samples': 2, 'learning_rate': 0.0958942820044505, 'log_max_bin': 10, 'colsample_bytree': 0.40118043723920377, 'reg_alpha': 0.006532533034382694, 'reg_lambda': 0.02014962736208268, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 100, 'num_leaves': 310, 'min_child_samples': 2, 'learning_rate': 0.0958942820044505, 'log_max_bin': 10, 'colsample_bytree': 0.40118043723920377, 'reg_alpha': 0.006532533034382694, 'reg_lambda': 0.02014962736208268, 'FLAML_sample_size': 364083}}\n"
+ "{'Current Learner': 'lgbm', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 14, 'min_child_samples': 15, 'learning_rate': 0.22841390623808822, 'log_max_bin': 9, 'colsample_bytree': 1.0, 'reg_alpha': 0.0014700173967242716, 'reg_lambda': 7.624911621832711, 'FLAML_sample_size': 10000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 14, 'min_child_samples': 15, 'learning_rate': 0.22841390623808822, 'log_max_bin': 9, 'colsample_bytree': 1.0, 'reg_alpha': 0.0014700173967242716, 'reg_lambda': 7.624911621832711, 'FLAML_sample_size': 10000}}\n",
+ "{'Current Learner': 'lgbm', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 25, 'min_child_samples': 12, 'learning_rate': 0.5082200481556802, 'log_max_bin': 8, 'colsample_bytree': 0.9696263001275751, 'reg_alpha': 0.0028107036379524425, 'reg_lambda': 3.716898117989413, 'FLAML_sample_size': 10000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 25, 'min_child_samples': 12, 'learning_rate': 0.5082200481556802, 'log_max_bin': 8, 'colsample_bytree': 0.9696263001275751, 'reg_alpha': 0.0028107036379524425, 'reg_lambda': 3.716898117989413, 'FLAML_sample_size': 10000}}\n",
+ "{'Current Learner': 'lgbm', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 23, 'num_leaves': 14, 'min_child_samples': 15, 'learning_rate': 0.22841390623808822, 'log_max_bin': 9, 'colsample_bytree': 1.0, 'reg_alpha': 0.0014700173967242718, 'reg_lambda': 7.624911621832699, 'FLAML_sample_size': 10000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 23, 'num_leaves': 14, 'min_child_samples': 15, 'learning_rate': 0.22841390623808822, 'log_max_bin': 9, 'colsample_bytree': 1.0, 'reg_alpha': 0.0014700173967242718, 'reg_lambda': 7.624911621832699, 'FLAML_sample_size': 10000}}\n",
+ "{'Current Learner': 'lgbm', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 101, 'num_leaves': 12, 'min_child_samples': 24, 'learning_rate': 0.07647794276357095, 'log_max_bin': 10, 'colsample_bytree': 1.0, 'reg_alpha': 0.001749539645587163, 'reg_lambda': 4.373760956394571, 'FLAML_sample_size': 10000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 101, 'num_leaves': 12, 'min_child_samples': 24, 'learning_rate': 0.07647794276357095, 'log_max_bin': 10, 'colsample_bytree': 1.0, 'reg_alpha': 0.001749539645587163, 'reg_lambda': 4.373760956394571, 'FLAML_sample_size': 10000}}\n",
+ "{'Current Learner': 'lgbm', 'Current Sample': 40000, 'Current Hyper-parameters': {'n_estimators': 101, 'num_leaves': 12, 'min_child_samples': 24, 'learning_rate': 0.07647794276357095, 'log_max_bin': 10, 'colsample_bytree': 1.0, 'reg_alpha': 0.001749539645587163, 'reg_lambda': 4.373760956394571, 'FLAML_sample_size': 40000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 101, 'num_leaves': 12, 'min_child_samples': 24, 'learning_rate': 0.07647794276357095, 'log_max_bin': 10, 'colsample_bytree': 1.0, 'reg_alpha': 0.001749539645587163, 'reg_lambda': 4.373760956394571, 'FLAML_sample_size': 40000}}\n",
+ "{'Current Learner': 'lgbm', 'Current Sample': 40000, 'Current Hyper-parameters': {'n_estimators': 361, 'num_leaves': 11, 'min_child_samples': 32, 'learning_rate': 0.13528717598813866, 'log_max_bin': 9, 'colsample_bytree': 0.9851977789068981, 'reg_alpha': 0.0038372002422749616, 'reg_lambda': 0.25113531892556773, 'FLAML_sample_size': 40000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 361, 'num_leaves': 11, 'min_child_samples': 32, 'learning_rate': 0.13528717598813866, 'log_max_bin': 9, 'colsample_bytree': 0.9851977789068981, 'reg_alpha': 0.0038372002422749616, 'reg_lambda': 0.25113531892556773, 'FLAML_sample_size': 40000}}\n",
+ "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 361, 'num_leaves': 11, 'min_child_samples': 32, 'learning_rate': 0.13528717598813866, 'log_max_bin': 9, 'colsample_bytree': 0.9851977789068981, 'reg_alpha': 0.0038372002422749616, 'reg_lambda': 0.25113531892556773, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 361, 'num_leaves': 11, 'min_child_samples': 32, 'learning_rate': 0.13528717598813866, 'log_max_bin': 9, 'colsample_bytree': 0.9851977789068981, 'reg_alpha': 0.0038372002422749616, 'reg_lambda': 0.25113531892556773, 'FLAML_sample_size': 364083}}\n",
+ "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 654, 'num_leaves': 27, 'min_child_samples': 61, 'learning_rate': 0.0705835177602005, 'log_max_bin': 10, 'colsample_bytree': 0.8629551479851468, 'reg_alpha': 0.016562972790870267, 'reg_lambda': 0.25883390536609663, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 654, 'num_leaves': 27, 'min_child_samples': 61, 'learning_rate': 0.0705835177602005, 'log_max_bin': 10, 'colsample_bytree': 0.8629551479851468, 'reg_alpha': 0.016562972790870267, 'reg_lambda': 0.25883390536609663, 'FLAML_sample_size': 364083}}\n",
+ "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 334, 'num_leaves': 50, 'min_child_samples': 101, 'learning_rate': 0.19240592731562936, 'log_max_bin': 9, 'colsample_bytree': 0.7263265270618353, 'reg_alpha': 0.042474252908075376, 'reg_lambda': 0.44574701224719, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 334, 'num_leaves': 50, 'min_child_samples': 101, 'learning_rate': 0.19240592731562936, 'log_max_bin': 9, 'colsample_bytree': 0.7263265270618353, 'reg_alpha': 0.042474252908075376, 'reg_lambda': 0.44574701224719, 'FLAML_sample_size': 364083}}\n"
]
}
],
@@ -637,11 +532,10 @@
{
"output_type": "display_data",
"data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAgAElEQVR4nO3dfZhdVXn38e+PIcAIhgETaDIBgg9JNBpNcEQRUaDSRB4lkSKCfUFsidaXamlDSa2UYil4Re2jV1NtsEqxgkAaxoiRkQJCRSCZEMzL4GAICDNBE0JGEEeSTO7nj71P2DnZMzkJs+ecmfP7XNe55uy1197nnjkzc5+11t5rKSIwMzMrd0C1AzAzs9rkBGFmZrmcIMzMLJcThJmZ5XKCMDOzXE4QZmaWywnCbD9IOlVSZ7XjMCuSE4QNO5KekPSuasYQEf8bEVOKOr+kmZLulfS8pM2S7pF0dlGvZ5bHCcIsh6SGKr72ucAtwPXABOBo4HLgvftxLkny37ntF//i2Igh6QBJl0l6TNIWSTdLOjKz/xZJv5T06/TT+esy+66T9FVJyyS9AJyetlT+RtLq9JibJB2S1j9NUlfm+H7rpvsvlfS0pI2S/lxSSDoh53sQ8CXgcxHx9Yj4dUTsjIh7IuLitM4Vkv4rc8zE9HwHpts/knSVpPuA3wLzJLWXvc5fSVqaPj9Y0hckPSnpV5K+JqnxZb4dNgI4QdhI8klgDvBOYDywFViY2f8DYBJwFPAQ8O2y4z8IXAW8EvhxWnYeMAs4HngD8KEBXj+3rqRZwCXAu4ATgNMGOMcU4Bhg8QB1KvEnwFyS7+VrwBRJkzL7PwjckD6/BpgMTE/jayZpsVidc4KwkeSjwGcioisiXgSuAM4tfbKOiG9ExPOZfW+UdHjm+O9GxH3pJ/bfpWVfiYiNEfEs8D2Sf6L96a/uecA3I2JdRPw2fe3+vCr9+nSl33Q/rktfb0dE/Br4LnABQJooXgMsTVssc4G/iohnI+J54J+B81/m69sI4ARhI8lxwK2SeiT1AI8AfcDRkhokXZN2Pz0HPJEeMyZz/FM55/xl5vlvgcMGeP3+6o4vO3fe65RsSb+OG6BOJcpf4wbSBEHSemhNk9VY4BXAyszP7fa03OqcE4SNJE8B746IpszjkIjoJvmnOJukm+dwYGJ6jDLHFzW18dMkg80lxwxQt5Pk+/jDAeq8QPJPveT3cuqUfy93AGMlTSdJFKXupWeAXuB1mZ/Z4RExUCK0OuEEYcPVKEmHZB4HkvS1XyXpOABJYyXNTuu/EniR5BP6K0i6UYbKzcBFkl4r6RXAZ/urGMn8+5cAn5V0kaTR6eD72yUtSqs9DLxD0rFpF9n8vQUQEdtJroxaABxJkjCIiJ3AtcC/SDoKQFKzpJn7/d3aiOEEYcPVMpJPvqXHFcCXgaXADyU9DzwAvCWtfz3wC6Ab6Ej3DYmI+AHwFeBuYH3mtV/sp/5i4APAh4GNwK+AfyIZRyAi7gBuAlYDK4HbKgzlBpIW1C0RsSNT/reluNLut/8hGSy3OicvGGQ2tCS9FlgLHFz2j9qsprgFYTYEJL0vvd/gCODzwPecHKzWOUGYDY2PAJuAx0iurPqL6oZjtnfuYjIzs1xuQZiZWa4Dqx3AYBkzZkxMnDix2mGYmQ0rK1eufCYicm+MHDEJYuLEibS3t++9opmZ7SLpF/3tcxeTmZnlcoIwM7NcThBmZpbLCcLMzHI5QZiZWa4RcxWTmVm9aV3VzYK2Tjb29DK+qZF5M6cwZ0bzoJ3fCcLMbBhqXdXN/CVr6N3eB0B3Ty/zl6wBGLQkUWgXk6RZkjolrZd0WT91zpPUIWmdpBvSstMlPZx5/E7SnCJjNTMbTha0de5KDiW92/tY0NY5aK9RWAtCUgPJgvFnAl3ACklLI6IjU2cSyWInp0TE1tKCJRFxN+l6vpKOJJmr/odFxWpmNtxs7Ondp/L9UWQL4iRgfURsiIhtwHdIlnzMuhhYGBFbASJiU855zgV+kK6fa2ZmwPimxn0q3x9FJohmdl84vSsty5oMTJZ0n6QHJM3KOc/5wI15LyBprqR2Se2bN28elKDNzIaDeTOn0DiqYbeyxlENzJs5eIsBVvsy1wOBScBpJAupXyupqbRT0jhgGtCWd3BELIqIlohoGTs2d64pM7MRac6MZq4+ZxoHNST/xpubGrn6nGnD5iqmbuCYzPaEtCyrC3gwXVD9cUmPkiSMFen+84Bb0/1mZpYxZ0YzNy5/EoCbPnLyoJ+/yBbECmCSpOMlHUTSVbS0rE4rSesBSWNIupw2ZPZfQD/dS2ZmVqzCEkS63u4nSLqHHgFujoh1kq6UdHZarQ3YIqkDuBuYFxFbACRNJGmB3FNUjGZm1r9Cb5SLiGXAsrKyyzPPA7gkfZQf+wR7DmqbmdkQqfYgtZmZ1SgnCDMzy+UEYWZmuZwgzMwslxOEmZnlcoIwM7NcThBmZpbLCcLMzHI5QZiZWS4vOWpm+6Xo9ZCt+pwgzGyfDcV6yFZ9ThBmts/6Ww/50sWrd00/bUOj4+nnmDpudCHn9hiEme2z/tY93ta3c4gjsanjRjN7ejGtNrcgzGyfjW9qpDsnSTQ3NRaycI1Vh1sQZrbPhmI9ZKs+tyDMbJ+VBqIvXbyabX07afZVTCOSE4SZ7Zei10O26nMXk5mZ5XKCMDOzXE4QZmaWywnCzMxyOUGYmVkuJwgzM8vlBGFmZrmcIMzMLJcThJmZ5XKCMDOzXE4QZmaWywnCzMxyOUGYmVkuJwgzM8vlBGFmZrkKTRCSZknqlLRe0mX91DlPUoekdZJuyJQfK+mHkh5J908sMlYzM9tdYQsGSWoAFgJnAl3ACklLI6IjU2cSMB84JSK2Sjoqc4rrgasi4g5JhwFeDd3MbAgVuaLcScD6iNgAIOk7wGygI1PnYmBhRGwFiIhNad2pwIERcUda/psC47Q61bqqmwVtnWzs6WW8l8w020ORXUzNwFOZ7a60LGsyMFnSfZIekDQrU94jaYmkVZIWpC2S3UiaK6ldUvvmzZsL+SZsZGpd1c38JWvo7uklgO6eXuYvWUPrqu5qh2ZWM6q9JvWBwCTgNGACcK+kaWn5qcAM4EngJuBDwH9kD46IRcAigJaWlhiqoG34W9DWSe/2vt3Kerf3ceni1bvWWba963j6OaaOG13tMKwgRbYguoFjMtsT0rKsLmBpRGyPiMeBR0kSRhfwcERsiIgdQCtwYoGxWp3Z2NObW76tz0Nd+2LquNHMnu5uuZGqyBbECmCSpONJEsP5wAfL6rQCFwDflDSGpGtpA9ADNEkaGxGbgTOA9gJjtTozvqmR7pwk0dzUyE0fObkKEZnVnsJaEOkn/08AbcAjwM0RsU7SlZLOTqu1AVskdQB3A/MiYktE9AF/A9wpaQ0g4NqiYrX6M2/mFBpH7T6s1TiqgXkzp1QpIrPao4iR0XXf0tIS7e1uZFjlWld1c+ni1Wzr20mzr2KyOiVpZUS05O2r9iC1WdXMmdG8a0Da3Upme/JUG2ZmlmuvCULSq4YiEDMzqy2VtCAekHSLpLMkqfCIzMysJlSSICaT3Iz2J8DPJf2zpMnFhmVmZtW21wQRiTsi4gKSuZMuBJZLukeSR/bMzEaovV7FlI5B/DFJC+JXwCeBpcB04Bbg+CIDNDOz6qjkMtf7gW8BcyKiK1PeLulrxYRlZmbVVkmCmBL93E0XEZ8f5HjMzKxGVDJI/UNJTaUNSUdIaiswJjMzqwGVJIixEdFT2kgX9zlqgPpmZjYCVJIg+iQdW9qQdBwwMiZwMjOzflUyBvEZ4MeS7iGZVfVUYG6hUZmZWdXtNUFExO2STgTemhZ9OiKeKTYsMzOrtkpnc+0DNgGHAFMlERH3FheWmZlVWyU3yv058CmSJUMfJmlJ3E+yypuZmY1QlQxSfwp4M/CLiDgdmEGyJKiZmY1glSSI30XE7wAkHRwRPwO8LqOZ2QhXyRhEV3qjXCtwh6StwC+KDcvMzKqtkquY3pc+vULS3cDhwO2FRmVmZlU3YIKQ1ACsi4jXAETEPUMSlZmZVd2AYxAR0Qd0Zu+kNjOz+lDJGMQRwDpJy4EXSoURcXZhUZmZWdVVkiA+W3gUZmZWcyoZpPa4g5lZHarkTurneWn21oOAUcALETG6yMDMzKy6KmlBvLL0XJKA2bw0cZ+ZmY1QldxJvUskWoGZBcVjZmY1opIupnMymwcALcDvCovIzMxqQiVXMb0383wH8ARJN5OZmY1glYxBXDQUgZiZWW3Z6xiEpP9MJ+srbR8h6RvFhmVmZtVWySD1GyJi1/oPEbGVZE2IvZI0S1KnpPWSLuunznmSOiStk3RDprxP0sPpY2klr2dmZoOnkjGIAyQdkSYGJB1ZyXHpRH8LgTOBLmCFpKUR0ZGpMwmYD5wSEVslHZU5RW9ETN+H78XMzAZRJQnii8D9km5Jt98PXFXBcScB6yNiA4Ck75AMbndk6lwMLCwln4jYVGngZmZWrEoGqa+X1M5La1Cfk20FDKAZeCqz3QW8pazOZABJ9wENwBURUVpr4pD0dXcA16T3X+xG0lxgLsCxx9bHhLOtq7pZ0NbJxp5exjc1Mm/mFObMaK52WGY2AlXSVfRWkjUh/jXdHi3pLRHx4CC9/iTgNGACcK+kaemYx3ER0S3p1cBdktZExGPZgyNiEbAIoKWlJRjhWld1M3/JGnq39wHQ3dPL/CVrAJwkzGzQVdLF9FXgxMz2b3LK8nQDx2S2J6RlWV3AgxGxHXhc0qMkCWNFRHQDRMQGST8iGRh/jDq2oK1zV3Io6d3ex6WLV3Pj8ierFNXw1vH0c0wd52nFzPJUchWTImLXp/OI2ElliWUFMEnS8ZIOAs4Hyq9GaiVpPSBpDEmX04b0UtqDM+WnsPvYRV3a2NObW76tb+cQRzJyTB03mtnT3foyy1PJP/oNkv6SpNUA8DFgw94Oiogdkj4BtJGML3wjItZJuhJoj4il6b4/kNQB9AHzImKLpLcB/y5pJ0kSu6bCcY8RbXxTI905SaK5qZGbPnJyFSIys5FMmcZBfoXk0tOvkAxSB3An8KmI2Fx8eJVraWmJ9vb2aodRqPIxCIDGUQ1cfc40j0GY2X6RtDIiWvL2VXIV0yaS7qHSyRqB9wC39HuQFaKUBC5dvJptfTtp9lVMZlagSrqYSje9zQQuILnx7cc4QVTFnBnNuwak3a1kZkUaMEFIeifwQeAsYDnJYPGrI+K3QxCbmZlVUb8JQlIX8CTJ4PTfRMTzkh53cjAzqw8DXea6GBgPfAB4r6RDeWltajMzG+H6TRAR8WngeJK5mE4DOoGx6eyrhw1NeGZmVi0D3iiXrkF9d0TMJUkWF5BMuPfEEMRmZmZVVNFVTADpdBi3Abell7qamdkIVnGCyIqI/DkfbL95llYzqzX7lSBscHmWVjOrRU4QNWBfZ2n1DKRmNhQqWQ9iMjAPOC5bPyLO6Pcg2yf7OkurZyA1s6FQSQviFuBrwLUkM67aIPMsrWZWiypZD2JHRHw1IpZHxMrSo/DI6si8mVNoHNWwW1njqAbmzZxSpYjMzCprQXxP0seAW4EXS4UR8WxhUQ0jg3H1kWdpNbNaVEmCuDD9Oi9TFsCrBz+c4WUwrz7yLK1mVmsqWQ/i+KEIZDga7DWifXWSmdWSSq5iGgX8BfCOtOhHwL+nd1bXtcFeI9pXJ5lZLamki+mrwCjg39LtP0nL/ryooIYLX31kZiNZJVcxvTkiLoyIu9LHRcCbiw5sOPDVR2Y2klWSIPok/Z/ShqRX4/shgGRg+epzpnFQQ/JjbG5q5OpzpvnqIzMbESrpYpoH3C1pAyCSO6ovKjSqYcRXH5nZSFXJVUx3SpoElPpNOiPixYGOMTOz4W+gNanPiIi7JJ1TtusESUTEkoJjMzOzKhqoBfFO4C7gvTn7AnCCMDMbwfpNEBHxD+nTKyPi8ew+Sb55zsxshKvkKqb/zilbPNiBmJlZbRloDOI1wOuAw8vGIUYDhxQdWC0rn6DvkFEHMOawg6sdlpnZoBpoDGIK8B6gid3HIZ4HLi4yqFqWN0HfAapyUGZmBRhoDOK7wHclnRwR9w9hTDUtb4K+nQFPPZs/L5OZ2XBVyY1yqyR9nKS7aVfXUkR8uLCoathgT9BnZlarKhmk/hbwe8BM4B5gAkk3U10a39SYW97cT7mZ2XBVSYI4ISI+C7wQEf8J/F/gLZWcXNIsSZ2S1ku6rJ8650nqkLRO0g1l+0ZL6pL0r5W83lDwBH1mVi8q6WIqrfvQI+n1wC+Bo/Z2kKQGYCFwJtAFrJC0NCI6MnUmAfOBUyJiq6Ty834OuLeCGIeMlwc1s3pRSYJYJOkI4LPAUuAw4PIKjjsJWB8RGwAkfQeYDXRk6lwMLIyIrQARsam0Q9KbgKOB24GWCl5vyHiCPjOrB5VM1vf19Ok97Ns61M3AU5ntLvbsmpoMIOk+oAG4IiJul3QA8EXgj4F39fcCkuYCcwGOPfbYfQjNzMz2ZqAb5S4Z6MCI+NIgvf4k4DSSwe97JU0jSQzLIqJL6v8mg4hYBCwCaGlpiUGIx8zMUgO1IF6Zfp1CsoLc0nT7vcDyCs7dDRyT2Z6QlmV1AQ+m61s/LulRkoRxMnCqpI+RdGkdJOk3EZE70G1mZoNvoBvl/hFA0r3AiRHxfLp9BfD9Cs69ApiUTuzXDZwPfLCsTitwAfBNSWNIupw2RMQflSpI+hDQ4uRgZja0KrnM9WhgW2Z7W1o2oIjYAXwCaAMeAW6OiHWSrpR0dlqtDdgiqQO4G5gXEVv25RswM7NiVHIV0/XAckm3pttzgOsqOXlELAOWlZVdnnkewCXpo79zXFfp65mZ2eCp5CqmqyT9ADg1LbooIlYVG5aZmVXbQFcxjY6I5yQdCTyRPkr7joyIZ4sPz8zMqmWgFsQNJNN9ryRZYrRE6fa+3BNhZmbDzEBXMb0n/erlRc3M6tBAXUwnDnRgRDw0+OGYmVmtGKiL6YsD7AvgjEGOxczMashAXUynD2UgZmZWWyq5D4J0mu+p7L6i3PVFBWVmZtW31wQh6R9IJtObSnLT27uBH5PcQGdmZiNUJVNtnAv8PvDLiLgIeCNweKFRmZlZ1VWSIHojYiewQ9JoYBO7z9Jad1pXdbPqyR4efPxZTrnmLlpXlU9Sa2Y2/FUyBtEuqQm4luSmud8A9xcaVQ1rXdXN/CVr2Na3E4Dunl7mL1kD4GVHzWxE6bcFIWmhpFMi4mMR0RMRXyNZX/rCtKupLi1o66R3e99uZb3b+1jQ1lmliMzMijFQC+JR4AuSxgE3Azd6kj7Y2NO7T+VmZsNVvy2IiPhyRJwMvBPYAnxD0s8k/YOkyUMWYY0Z39S4T+VmZsPVXgepI+IXEfH5iJhBsvrbHJIFgOrSvJlTaBzVsFtZ46gG5s2cUqWIzMyKsdcEIelASe+V9G3gB0AncE7hkdWoOTOaufqcaRzUkPzompsaufqcaR6gNrMRZ6DJ+s4kaTGcBSwHvgPMjYgXhii2mjVnRjM3Ln8SgJs+cnKVozEzK8ZAg9TzSdaE+OuI2DpE8ZiZWY0YaLI+z9ZqZlbHKrmT2szM6pAThJmZ5XKCMDOzXE4QZmaWywnCzMxyOUGYmVkuJwgzM8vlBGFmZrmcIMzMLJcThJmZ5XKCMDOzXE4QZmaWq9AEIWmWpE5J6yVd1k+d8yR1SFon6Ya07DhJD0l6OC3/aJFxmpnZngaa7vtlkdQALATOBLqAFZKWRkRHps4kkmnFT4mIrZKOSnc9DZwcES9KOgxYmx67sah4zcxsd0W2IE4C1kfEhojYRrLg0OyyOhcDC0vrTUTEpvTrtoh4Ma1zcMFxmplZjiL/8TYDT2W2u9KyrMnAZEn3SXpA0qzSDknHSFqdnuPzea0HSXMltUtq37x5cwHfgplZ/ar2J/MDgUnAaSTLm14rqQkgIp6KiDcAJwAXSjq6/OCIWBQRLRHRMnbs2CEM28xs5CsyQXQDx2S2J6RlWV3A0ojYHhGPA4+SJIxd0pbDWuDUAmM1M7MyRSaIFcAkScdLOgg4H1haVqeVpPWApDEkXU4bJE2Q1JiWHwG8HegsMFYzMytTWIKIiB3AJ4A24BHg5ohYJ+lKSWen1dqALZI6gLuBeRGxBXgt8KCknwL3AF+IiDVFxWpmZnsq7DJXgIhYBiwrK7s88zyAS9JHts4dwBuKjM3MzAZW7UFqMzOrUU4QZmaWywnCzMxyOUGYmVkuJwgzM8vlBGFmZrmcIMzMLJcThJmZ5XKCMDOzXE4QZmaWywnCzMxyOUGYmVkuJwgzM8vlBGFmZrmcIMzMLFeh60EMJ62rulnQ1snGnl7GNzUyb+YU5sxornZYZmZV4wRBkhzmL1lD7/Y+ALp7epm/JFnAzknCzOqVEwSwoK1zV3Io6d3ex6WLV3Pj8idzj+l4+jmmjhs9FOGZmVWFxyCAjT29ueXb+nb2e8zUcaOZPd2tCzMbudyCAMY3NdKdkySamxq56SMnVyEiM7PqcwsCmDdzCo2jGnYraxzVwLyZU6oUkZlZ9bkFwUsD0ZcuXs22vp00+yomMzMniJI5M5p3DUi7W8nMzAlit/sfRjUcwDFHNlY7JDOzmlDXYxCl+x+6e3oJkquWHn/mBVpXdVc7NDOzqqvrBJF3/8POSMrNzOpdXSeI/u5/6K/czKye1HWCGN+UP97QX7mZWT2p6wTh+x/MzPpX11cxle5z8CyuZmZ7qusEAUmScEIwM9tTXXcxmZlZ/wpNEJJmSeqUtF7SZf3UOU9Sh6R1km5Iy6ZLuj8tWy3pA0XGaWZmeyqsi0lSA7AQOBPoAlZIWhoRHZk6k4D5wCkRsVXSUemu3wJ/GhE/lzQeWCmpLSJ6iorXzMx2V2QL4iRgfURsiIhtwHeA2WV1LgYWRsRWgIjYlH59NCJ+nj7fCGwCxhYYq5mZlSkyQTQDT2W2u9KyrMnAZEn3SXpA0qzyk0g6CTgIeCxn31xJ7ZLaN2/ePIihm5lZta9iOhCYBJwGTADulTSt1JUkaRzwLeDCiNhjebeIWAQsSutulvSLvbzeGOCZwQt/UDm2/ePY9o9j2z8jMbbj+ttRZILoBo7JbE9Iy7K6gAcjYjvwuKRHSRLGCkmjge8Dn4mIB/b2YhGx1y4oSe0R0VLpNzCUHNv+cWz7x7Htn3qLrcguphXAJEnHSzoIOB9YWlanlaT1gKQxJF1OG9L6twLXR8TiAmM0M7N+FJYgImIH8AmgDXgEuDki1km6UtLZabU2YIukDuBuYF5EbAHOA94BfEjSw+ljelGxmpnZngodg4iIZcCysrLLM88DuCR9ZOv8F/BfBYS0qIBzDhbHtn8c2/5xbPunrmJT8j/azMxsd55qw8zMcjlBmJlZrrpIEJXMCTXE8XxD0iZJazNlR0q6Q9LP069HVCGuYyTdnZkb61M1FNshkpZL+mka2z+m5cdLejB9b29Kr4CrCkkNklZJuq2WYpP0hKQ16cUe7WlZ1d/TNI4mSYsl/UzSI5JOroXYJE3JXCDzsKTnJH26FmJL4/ur9O9graQb07+PQf99G/EJIjMn1LuBqcAFkqZWNyquA8rvGr8MuDMiJgF3pttDbQfw1xExFXgr8PH0Z1ULsb0InBERbwSmA7MkvRX4PPAvEXECsBX4syrEVvIpkiv2SmopttMjYnrmOvlaeE8BvgzcHhGvAd5I8vOremwR0Zn+vKYDbyKZH+7WWohNUjPwl0BLRLweaCC5jWDwf98iYkQ/gJOBtsz2fGB+DcQ1EVib2e4ExqXPxwGdNRDjd0kmW6yp2IBXAA8BbyG5c/TAvPd6iGOaQPIP4wzgNkA1FNsTwJiysqq/p8DhwOOkF8vUUmxl8fwBcF+txMZL0xgdSXIl6m3AzCJ+30Z8C4LK5oSqBUdHxNPp818CR1czGEkTgRnAg9RIbGkXzsMkkzfeQTI/V08k99xAdd/b/wdcCpSmhHkVtRNbAD+UtFLS3LSsFt7T44HNwDfTrrmvSzq0RmLLOh+4MX1e9dgiohv4AvAk8DTwa2AlBfy+1UOCGHYi+QhQteuPJR0G/Dfw6Yh4LruvmrFFRF8kTf4JJLMFv6YacZST9B5gU0SsrHYs/Xh7RJxI0s36cUnvyO6s4nt6IHAi8NWImAG8QFmXTQ38LRwEnA3cUr6vWrGl4x6zSRLseOBQ9uyyHhT1kCAqmROqFvwqnZywNEnhpmoEIWkUSXL4dkQsqaXYSiKZzPFukmZ0k6TSDZ/Vem9PAc6W9ATJtPZnkPSt10JspU+cRDKd/q0kybUW3tMuoCsiHky3F5MkjFqIreTdwEMR8at0uxZiexfweERsjmQeuyUkv4OD/vtWDwmikjmhasFS4ML0+YUk/f9DSpKA/wAeiYgv1VhsYyU1pc8bScZGHiFJFOdWM7aImB8REyJiIsnv110R8Ue1EJukQyW9svScpD99LTXwnkbEL4GnJE1Ji34f6KiF2DIu4KXuJaiN2J4E3irpFenfbOnnNvi/b9Uc/BnCQZ2zgEdJ+qw/UwPx3EjSd7id5FPUn5H0Wd8J/Bz4H+DIKsT1dpIm82rg4fRxVo3E9gZgVRrbWuDytPzVwHJgPUk3wMFVfm9PA26rldjSGH6aPtaVfv9r4T1N45gOtKfvaytwRA3FdiiwBTg8U1Yrsf0j8LP0b+FbwMFF/L55qg0zM8tVD11MZma2H5wgzMwslxOEmZnlcoIwM7NcThBmZpbLCcKGBUn/IunTme02SV/PbH9R0iX5R4Ok6ySdmz7/kaQ9FneXNErSNelMnQ9Jul/Su9N9TyhZN31f4971uv3sX5jOFtohqTcze+i5kpaV7v0YTJLGlWac7Wf/QZLuzdx0ZXXKCcKGi/uAtwFIOgAYA7wus/9twE9e5mt8jmQCttdHMjXFHOCVL/OcA4qIj0cyfchZwGORziAaEYsj4qxI7hofbJcA1w4Q0zaSa/0/UMBr2zDiBGHDxU9IptaAJDGsBZ6XdISkg4HXAg9JulzSinSe/EXpnaZ7JekVwMXAJyPiRYCI+FVE3JxT95L0/GvLWjV/Kmm1kjUrvpVz3OfSFkVDhTE9IWmMpFVa5x0AAALySURBVIlK1ku4TtKjkr4t6V2S7ktbOyel9Q9VstbI8nTyu9n9nPoPgdvTY16X1n84jX1SWqcV+KNK4rSRy01IGxYiYqOkHZKOJWkt3E8yW+XJJLNZromIbZL+NSKuBEj/Sb8H+F4FL3EC8GSUTU5YTtKbgItIphoX8KCke4BtwN8Db4uIZyQdWXbcApLWyEWxf3enngC8H/gwyfQxHyS58/1s4O9IWjufIZnm48Np19RySf8TES9k4jge2FpKgsBHgS9HxLfTqWhKyWst8Ob9iNNGELcgbDj5CUlyKCWI+zPb96V1TleyqtYakknzXpd3opfh7cCtEfFCRPyGZKK0U9PXuiUingGIiGczx3yWZLqGj+5ncoBkcrY1EbGTZMqMO9NzrSFZWwSSeZYuUzIl+o+AQ4Bjy84zjmSK7ZL7gb+T9LfAcRHRm8bfB2wrzeNk9ckJwoaT0jjENJJPuA+QtCDeBvxE0iHAvwHnRsQ0kn72Qyo893rgWEmjBz3q5BP/m8pbFfvoxczznZntnbzUEyDgDzPjGMdGRHaFO4BeMj+TiLiBpBXSCyyTdEam7sHA715GzDbMOUHYcPITki6jZyNZG+JZoIkkSfyEl/7xPaNkTYt+rx4qFxG/JZnJ9stpV0tpBtn3l1X9X2BOOpPmocD70rK7gPdLelV6bDYZ3A5cA3y/4E/kbcAnS+Mukmbk1HmUl1ocSHo1sCEivkIy++cb0vJXAc9EMp201SknCBtO1pBcvfRAWdmvI+KZ9Iqfa0laF20kn9z3xd+TdL90SFpLspRj+YJJD5GsKb6cZLW9r0fEqohYB1wF3CPpp8CXyo67JY1taTpdeRE+B4wCVktal27vJh2PeEzSCWnRecDatFvq9cD1afnpwPcLitOGCc/malZnJL0PeFNE/P0AdZYAl0XEo0MXmdUaX8VkVmci4tZSV1ietIut1cnB3IIwM7NcHoMwM7NcThBmZpbLCcLMzHI5QZiZWS4nCDMzy/X/AUdLzLp17HVCAAAAAElFTkSuQmCC",
"text/plain": [
""
- ],
- "image/svg+xml": "\n\n\n\n",
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAgAElEQVR4nO3de5hddX3v8fcnk5CMxTBgAk2GS8IhicYGE40o4gVoMZFHSUREsBfFFrReji01lNRKLRwFm2qPfUy1wYMUFUXSOEZNiRQQWm5JIJjL0GBMkMwETYCMIowkmXzPH2vtsLJZM7MSZs2ePfvzep79zF6/dftO2Ozv/H7rd1FEYGZmVm1ErQMwM7OhyQnCzMxyOUGYmVkuJwgzM8vlBGFmZrmcIMzMLJcThNkhkPQmSZtqHYdZmZwgrO5IelTSH9Qyhoj4r4iYVtb1Jc2RdJekpyXtlHSnpHPKup9ZHicIsxySmmp47/OAm4EbgGOBY4ArgHccwrUkyf+f2yHxB8eGDUkjJF0u6WeSnpT0HUlHZfbfLOkXkn6V/nX+ysy+6yV9WdIKSc8AZ6Q1lU9IWpeec5OkMenxp0vqyJzf67Hp/sskPS5pu6Q/kxSSTsr5HQR8AbgqIr4aEb+KiH0RcWdEXJwe82lJ38icMym93sh0+8eSPiPpbuBZYIGkNVX3+UtJy9P3oyX9o6THJP1S0lckNb/I/xw2DDhB2HDyMWA+8BZgIrALWJzZ/x/AFOBo4EHgm1Xnvxf4DPBS4L/TsvOBucBk4GTg/X3cP/dYSXOBS4E/AE4CTu/jGtOA44ClfRxTxB8Dl5D8Ll8Bpkmaktn/XuDG9P01wFRgZhpfK0mNxRqcE4QNJx8CPhkRHRHxHPBp4LzKX9YRcV1EPJ3Z9ypJR2TO/15E3J3+xf7btOyfI2J7RDwFfJ/kS7Q3vR17PvC1iNgYEc+m9+7Ny9Kfjxf9pXtxfXq/vRHxK+B7wIUAaaJ4ObA8rbFcAvxlRDwVEU8DnwUueJH3t2HACcKGkxOA70rqktQFPAz0AMdIapJ0Tdr89Gvg0fSccZnzt+Vc8xeZ988Ch/dx/96OnVh17bz7VDyZ/pzQxzFFVN/jRtIEQVJ7aEuT1XjgJcADmX+3W9Jya3BOEDacbAPeFhEtmdeYiOgk+VKcR9LMcwQwKT1HmfPLmtr4cZKHzRXH9XHsJpLf4119HPMMyZd6xe/mHFP9u9wKjJc0kyRRVJqXngC6gVdm/s2OiIi+EqE1CCcIq1ejJI3JvEaStLV/RtIJAJLGS5qXHv9S4DmSv9BfQtKMMli+A1wk6RWSXgJ8qrcDI5l//1LgU5IukjQ2ffj+RklL0sMeAt4s6fi0iWxhfwFExB6SnlGLgKNIEgYRsQ+4FvgnSUcDSGqVNOeQf1sbNpwgrF6tIPnLt/L6NPBFYDnwI0lPA/cBr0uPvwH4OdAJtKf7BkVE/Afwz8AdwObMvZ/r5filwHuADwDbgV8C/4fkOQIRcStwE7AOeAD4QcFQbiSpQd0cEXsz5X9diSttfvtPkofl1uDkBYPMBpekVwAbgNFVX9RmQ4prEGaDQNI70/EGRwKfA77v5GBDnROE2eD4ILAD+BlJz6o/r204Zv1zE5OZmeVyDcLMzHKNrHUAA2XcuHExadKkWodhZlZXHnjggSciIndg5LBJEJMmTWLNmjX9H2hmZvtJ+nlv+9zEZGZmuZwgzMwslxOEmZnlcoIwM7NcThBmZpar1AQhaa6kTZI2S7q8l2POl9QuaaOkG9OyMyQ9lHn9VtL8MmM1M6s3bWs7Oe2a25l8+Q857ZrbaVvbOaDXL62ba7ro+2LgLKADWC1peUS0Z46ZQjJV8WkRsasy3XBE3EG6Gle6pvBm4EdlxWpmVm/a1naycNl6uvf0ANDZ1c3CZesBmD+rdUDuUWYN4hRgc0RsiYjdwLdJFmzJuhhYHBG7ACJiR851zgP+I139yszMgEUrN+1PDhXde3pYtHLTgN2jzATRyoHLHnakZVlTgamS7pZ0X7q4e7ULgG/l3UDSJZLWSFqzc+fOAQnazKwebO/qPqjyQ1Hrh9QjgSnA6STLIF4rqaWyU9IEYAawMu/kiFgSEbMjYvb48V5C18wax8SW5oMqPxRlJohODlx799i0LKsDWB4ReyJiK/AIScKoOB/4brpcopmZpRbMmUbzqKYDyppHNbFgzsAtBlhmglgNTJE0WdJhJE1Fy6uOaSOpPSBpHEmT05bM/gvppXnJzKyRzZ/VytXnzuCwpuRrvLWlmavPnTFgD6ihxF5MEbFX0kdJmoeagOsiYqOkK4E1EbE83fdWSe0ki6gsiIgnASRNIqmB3FlWjGZm9Wz+rFa+teoxAG764KkDfv1SZ3ONiBUki8tny67IvA/g0vRVfe6jvPChtpmZDZJaP6Q2M7MhygnCzMxyOUGYmVkuJwgzM8vlBGFmZrmcIMzMLJcThJmZ5XKCMDOzXE4QZmaWywnCzMxyOUGYmVkuJwgzM8vlBGFmZrmcIMzMLFep032bmdWLtrWdLFq5ie1d3UxsaWbBnGkDuvhOPXKCMLOG17a2k4XL1tO9pweAzq5uFi5bD9DQScIJwswa3qKVm/Ynh4ruPT1ctnTd/hXbhqr2x3/N9AljS7m2n0GYWcPb3tWdW767Z98gR3Lwpk8Yy7yZ5dRyXIMws4Y3saWZzpwk0drSXMpaz/XCNQgza3gL5kyjeVTTAWXNo5pYMGdajSIaGlyDMLOGV3kQfdnSdezu2UerezEBThBmZkCSJCoPpBu5WSnLTUxmZpbLCcLMzHI5QZiZWS4nCDMzy+UEYWZmuZwgzMwslxOEmZnlcoIwM7NcThBmZpbLCcLMzHKVmiAkzZW0SdJmSZf3csz5ktolbZR0Y6b8eEk/kvRwun9SmbGamdmBSpuLSVITsBg4C+gAVktaHhHtmWOmAAuB0yJil6SjM5e4AfhMRNwq6XBg6E/MbmY2jJRZgzgF2BwRWyJiN/BtYF7VMRcDiyNiF0BE7ACQNB0YGRG3puW/iYhnS4zVzMyqlJkgWoFtme2OtCxrKjBV0t2S7pM0N1PeJWmZpLWSFqU1EjMzGyS1fkg9EpgCnA5cCFwrqSUtfxPwCeC1wInA+6tPlnSJpDWS1uzcuXOwYjYzawhlJohO4LjM9rFpWVYHsDwi9kTEVuARkoTRATyUNk/tBdqAV1ffICKWRMTsiJg9fvz4Un4JM7NGVWaCWA1MkTRZ0mHABcDyqmPaSGoPSBpH0rS0JT23RVLlW/9MoB0zMxs0pSWI9C//jwIrgYeB70TERklXSjonPWwl8KSkduAOYEFEPBkRPSTNS7dJWg8IuLasWM3M7IVKXXI0IlYAK6rKrsi8D+DS9FV97q3AyWXGZ2Zmvav1Q2ozMxuinCDMzCyXE4SZmeVygjAzs1z9JghJLxuMQMzMbGgpUoO4T9LNks6WpNIjMjOzIaFIgpgKLAH+GPippM9KmlpuWGZmVmv9JohI3BoRF5LMvvo+YJWkOyWdWnqEZmZWE/0OlEufQfwRSQ3il8DHSKbMmAncDEwuM0AzM6uNIiOp7wW+DsyPiI5M+RpJXyknLDMzq7UiCWJaOiXGC0TE5wY4HjPrRdvaThat3MT2rm4mtjSzYM405s+qXmLFbOAUeUj9o3SNBgAkHSlpZYkxmVmVtrWdLFy2ns6ubgLo7Opm4bL1tK2tnkHfbOAUqUGMj4iuykbO2tFmVrJFKzfRvafngLLuPT1ctnQd31r1WI2iGn7aH/810yeMrXUYQ0aRGkSPpOMrG5JOAHKbnMysHNu7unPLd/fsG+RIhrfpE8Yyb6ab7SqK1CA+Cfy3pDtJ1mV4E3BJqVGZlaRe2/EntjTTmZMkWluauemD7m1u5SgyDuIWkuU+bwK+DbwmIvwMwupOPbfjL5gzjeZRTQeUNY9qYsGcaTWKyBpB0QWDeoAdwBhguiQi4q7ywjIbePXejj+xZQxbdj5DkNQc6qX2Y/WryEC5PwM+DhwLPAS8nmRsxJnlhmY2sOq9HX/c4aMZd/ho5s1s5b2vO77/E8xepCI1iI8DrwXui4gzJL0c+Gy5YZkNPLfjmx2cIr2YfhsRvwWQNDoi/gdww6fVHbfjmx2cIjWIjnSgXBtwq6RdwM/LDcsaWVk9jSrXuGzpOnb37HM7vlk/+k0QEfHO9O2nJd0BHAHcUmpU1rAqPY0qD5MrPY2AAUsSlQfSblYy61ufCUJSE7AxIl4OEBF3DkpU1rAGo6eRR8uaFdNngoiIHkmbJB0fEUO/H6DlqqfBYYPR08ijZc2KKfIM4khgo6RVwDOVwog4p7SobMCU3WQz0NzTyGzoKJIgPlV6FFaaehscNmbUCEYI9mVm+3JPI7PaKPKQ2s8d6li9DQ4bd/hoALY91e2eRmY1VmQk9dM8P3vrYcAo4JmI8FO+OuAmGzM7VEUm63tpRIxNE0Iz8C7gX0qPzAaEB4eZ2aEqMpJ6v0i0AXNKiscG2PxZrVx97gwOa0r+U7e2NHP1uTPcZGNm/SrSxHRuZnMEMBv4bWkR2YDz4DAzOxRFejG9I/N+L/AoMK+UaMzMbMgo0ovposEIxMzMhpZ+n0FI+rd0sr7K9pGSritycUlz05HYmyVd3ssx50tql7RR0o2Z8h5JD6Wv5UXuZ2ZmA6dIE9PJEdFV2YiIXZJm9XdSOo/TYuAsoANYLWl5RLRnjpkCLAROS697dOYS3RExs+gvYmZmA6tIL6YRko6sbEg6imKJ5RRgc0RsiYjdJOtZVz+7uBhYHBG7ACJiR7GwzcysbEUSxOeBeyVdJekq4B7gHwqc1wpsy2x3pGVZU4Gpku6WdJ+kuZl9YyStScvn591A0iXpMWt27txZICQzMyuqyEPqGySt4fk1qM/NNhMNwP2nAKeTrHl9l6QZaZPWCRHRKelE4HZJ6yPiZ1WxLQGWAMyePTswM7MBU+Qh9euBbRHxpYj4EskKc68rcO1O4LjM9rFpWVYHsDwi9kTEVuARkoRBRHSmP7cAPwb6fe5hZmYDp0gT05eB32S2f5OW9Wc1MEXSZEmHARcA1b2R2khqD0gaR9LktCXtKTU6U34aMFC1FjMzK6DIw2ZFxP7mm4jYJ6lI09ReSR8FVgJNwHURsVHSlcCaiFie7nurpHagB1gQEU9KegPwr5L2kSSxawawWcvMzAookiC2SPrfPF9r+DCwpcjFI2IFsKKq7IrM+wAuTV/ZY+4BZhS5h5mZlaNIE9OHgDeQPD/oAF5H0j3VzMyGsSJNRTtInh8AIKkZeDtwc4lxDWv1tEa0mTWuQtN9S2qSdLakrwNbgfeUG9bwVVkjurOrm+D5NaLb1lZ38DIzq60+axCS3gK8FzgbWEXSm+jEiHh2EGIblmq1RnT7479m+gQvAmhmxfWaICR1AI+RPJz+REQ8LWmrk8OLU6s1oqdPGMu8mW7GMrPi+qpBLAXmkzQn9Uj6Hs+vTW2HyGtEm1m96PUZRET8BTCZZC6m04FNwPh0eu7DBye84cdrRJtZvejzGUQ6TuEO4A5Jo0jWor4Q+BdgXPnhDT+V3kqXLV3H7p59tLoXk5kNUUUGygEQEXuAHwA/SLu62iHyGtFmVg8KdXOtFhH5T1rNzGzYKFyDsEPjQXFmVq+cIEpUGRRXGfdQGRRnZlYP+k0QkqYCC4ATssdHxJm9nmRA34PiRo8a4YFrZjakFalB3Ax8BbiWZEpuK6ivQXGzjm/xwDUzG9KKJIi9EVFkgSCr4kFxZlbPivRi+r6kD0uaIOmoyqv0yIYBD4ozs3pWpAbxvvTngkxZACcOfDjDiwfFmVk9K7IexOTBCGS48qA4M6tXRXoxjQL+HHhzWvRj4F/TkdVmZjZMFWli+jIwimT+JYA/Tsv+rKygzMys9ookiNdGxKsy27dL+klZAZmZ2dBQpBdTj6T/VdmQdCIeD2FmNuwVqUEsIJnuewsgkhHVF5UalZmZ1VyRXky3SZoCVDrvb4qI58oNy8zMaq2vNanPjIjbJZ1bteskSUTEspJjMzOzGuqrBvEW4HbgHTn7AnCCMDMbxnpNEBHxd+nbKyNia3afJA+eMzMb5or0Yvr3nLKlAx2ImZkNLX09g3g58ErgiKrnEGOBMWUHZmZmtdXXM4hpwNuBFg58DvE0cHGZQZmZWe319Qzie8D3JJ0aEfcOYkxmZjYEFBkot1bSR0iam/Y3LUXEB0qLyszMaq7IQ+qvA78LzAHuBI4laWbql6S5kjZJ2izp8l6OOV9Su6SNkm6s2jdWUoekLxW5n5mZDZwiNYiTIuLdkuZFxL+lX+L/1d9JkpqAxcBZQAewWtLyiGjPHDMFWAicFhG7JB1ddZmrgLuK/jJmZjZwiiSIyroPXZJ+D/gFUP1FnucUYHNEbAGQ9G1gHtCeOeZiYHFE7AKIiB2VHZJeAxwD3ALMLnC/IaVtbSeLVm5ie1c3o5pGcNxRzbUOyczsoBRpYloi6UjgU8Byki/4fyhwXiuwLbPdkZZlTQWmSrpb0n2S5gJIGgF8HvhEgfsMOW1rO1m4bD2dXd0EsLtnH1ufeIa2tZ21Ds3MrLAik/V9NX17JwO/DvVIYApwOsmzjbskzQD+CFgRER2Sej1Z0iXAJQDHH3/8AId26Bat3ET3ngNnRN8XSbnXozazetHXQLlL+zoxIr7Qz7U7geMy28emZVkdwP3p8qVbJT1CkjBOBd4k6cPA4cBhkn4TEQc86I6IJcASgNmzZ0c/8Ry0bDPRxJZmFsyZVugLfntX90GVm5kNRX3VIF6a/pwGvJakeQmSQXOrClx7NTAlnbepE7gAeG/VMW3AhcDXJI0jaXLaEhF/WDlA0vuB2dXJoWyVZqJKTaCzq5uFy9YD9JskJrY005mTDCa2+DmEmdWPvgbK/T2ApLuAV0fE0+n2p4Ef9nfhiNgr6aPASqAJuC4iNkq6ElgTEcvTfW+V1E6ySt2CiHjyRf5OAyKvmah7Tw+XLV3Ht1Y91ue5Y0aNYISSZqWK5lFNLJgzrfeTzMyGmCK9mI4Bdme2d6dl/YqIFcCKqrIrMu8DuDR99XaN64Hri9xvIPXWHLS7Z1+/5447fDQA257qZnfPPloPonnKzGyoKJIgbgBWSfpuuj2fGnxhD7bemolaW5q56YOn1iAiM7PB1W8314j4DMka1LvS10URcXXZgdXagjnTaB7VdECZm4nMrJH01YtpbET8WtJRwKPpq7LvqIh4qvzwaqfSHHTZ0nVuJjKzhtRXE9ONJNN9P0CyxGiF0u2BHhMx5Myf1br/gbSblcys0fTVi+nt6U8vL2pm1oD6amJ6dV8nRsSDAx+OmZkNFX01MX2+j30BnDnAsZiZ2RDSVxPTGYMZiJmZDS1FxkGQTvM9nQNXlLuhrKDMzKz2+k0Qkv6OZLbV6SSjot8G/DfJADozMxumitQgzgNeBayNiIskHQN8o9ywasuL/ZiZFVswqDsi9gF7JY0FdnDgNN7Dihf7MTNLFEkQayS1ANeSDJp7ELi31KhqqK/FfszMGklf4yAWAzdGxIfToq9IugUYGxHrBiW6GvBiP2Zmib5qEI8A/yjpUUn/IGlWRDw6nJMD9L6ojxf7MbNG02uCiIgvRsSpwFuAJ4HrJP2PpL+TNHXQIhxknsXVzCxRZLrvn0fE5yJiFsnyoPOBh0uPrEbmz2rl6nNncFhT8k/T2tLM1efO8CyuZtZwioyDGEky9uEC4PeBHwOfLjWqGsh2bZ3Y0sxxRzUz7vDRnsXVzBpWXw+pzyKpMZwNrAK+DVwSEc8MUmyDptK1tdJ7qbOrmxGqcVBmZjXWVw1iIcmaEH8VEbsGKZ6a6K1r67an3HPJzBpXX5P1Ncxsrb11Yd3ds2+QIzEzGzqKDJQb9nrrwtrqrq1m1sCcIHDXVjOzPIWm+x7uKl1YL1u6jt09+2htaWbBnGnu2mpmDc0JIjV/VivfWvUYgLu2mpnhJiYzM+uFE4SZmeVygjAzs1xOEGZmlssJwszMcjlBmJlZLicIMzPL5QRhZma5Sk0QkuZK2iRps6TLeznmfEntkjZKujEtO0HSg5IeSss/VGacZmb2QqWNpJbUBCwGzgI6gNWSlkdEe+aYKSTTip8WEbskHZ3uehw4NSKek3Q4sCE9d3tZ8ZqZ2YHKrEGcAmyOiC0RsZtkwaF5VcdcDCyurDcRETvSn7sj4rn0mNElx2lmZjnK/OJtBbZltjvSsqypwFRJd0u6T9Lcyg5Jx0lal17jc3m1B0mXSFojac3OnTtL+BXMzBpXrf8yHwlMAU4nWd70WkktABGxLSJOBk4C3ifpmOqTI2JJRMyOiNnjx48fxLDNzIa/MhNEJ3BcZvvYtCyrA1geEXsiYivwCEnC2C+tOWwA3lRirGZmVqXMBLEamCJpsqTDgAuA5VXHtJHUHpA0jqTJaYukYyU1p+VHAm8ENpUYq5mZVSktQUTEXuCjwErgYeA7EbFR0pWSzkkPWwk8KakduANYEBFPAq8A7pf0E+BO4B8jYn1ZsZqZ2QuVumBQRKwAVlSVXZF5H8Cl6St7zK3AyWXGZmZmfav1Q2ozMxuinCDMzCyXE4SZmeVygjAzs1xOEGZmlssJwszMcjlBmJlZLicIMzPL5QRhZma5nCDMzCyXE4SZmeVygjAzs1xOEGZmlssJwszMcjlBmJlZLicIMzPL5QRhZma5nCDMzCyXE4SZmeVygjAzs1xOEGZmlssJwszMcjlBmJlZrpG1DqDW2tZ2smjlJrZ3dTOqaQTHHdVc65DMzIaEhq5BtK3tZOGy9XR2dRPA7p59bH3iGdrWdtY6NDOzmmvoBLFo5Sa69/QcULYvknIzs0bX0Alie1f3QZWbmTWShk4QE1vynzf0Vm5m1kgaOkEsmDON5lFNB5Q1j2piwZxpNYrIzGzoaOheTPNntQLs78U0saWZBXOm7S83M2tkDZ0gIEkSTghmZi/U0E1MZmbWu1IThKS5kjZJ2izp8l6OOV9Su6SNkm5My2ZKujctWyfpPWXGaWZmL1RaE5OkJmAxcBbQAayWtDwi2jPHTAEWAqdFxC5JR6e7ngX+JCJ+Kmki8ICklRHRVVa8ZmZ2oDJrEKcAmyNiS0TsBr4NzKs65mJgcUTsAoiIHenPRyLip+n77cAOYHyJsZqZWZUyE0QrsC2z3ZGWZU0Fpkq6W9J9kuZWX0TSKcBhwM9Ki9TMzF6g1r2YRgJTgNOBY4G7JM2oNCVJmgB8HXhfROyrPlnSJcAl6eZvJPU3R8Y44IkBir0WHH9tOf7acvzlOKG3HWUmiE7guMz2sWlZVgdwf0TsAbZKeoQkYayWNBb4IfDJiLgv7wYRsQRYUjQgSWsiYvZB/A5DiuOvLcdfW45/8JXZxLQamCJpsqTDgAuA5VXHtJHUHpA0jqTJaUt6/HeBGyJiaYkxmplZL0pLEBGxF/gosBJ4GPhORGyUdKWkc9LDVgJPSmoH7gAWRMSTwPnAm4H3S3oofc0sK1YzM3uhUp9BRMQKYEVV2RWZ9wFcmr6yx3wD+EYJIRVujhqiHH9tOf7acvyDTMl3tJmZ2YE81YaZmeVygjAzs1wNkSCKzAk11Ei6TtIOSRsyZUdJulXST9OfR9Yyxt5IOk7SHZk5tj6eltdL/GMkrZL0kzT+v0/LJ0u6P/0c3ZT2thuyJDVJWivpB+l2vcX/qKT1aSeVNWlZXXyGACS1SFoq6X8kPSzp1HqKHxogQWTmhHobMB24UNL02kZVyPVA9cjyy4HbImIKcFu6PRTtBf4qIqYDrwc+kv6b10v8zwFnRsSrgJnAXEmvBz4H/FNEnATsAv60hjEW8XGSHoQV9RY/wBkRMTMzfqBePkMAXwRuiYiXA68i+W9RT/FDRAzrF3AqsDKzvRBYWOu4CsY+CdiQ2d4ETEjfTwA21TrGgr/H90gmbay7+IGXAA8CryMZBTsyLT/gczXUXiQDU28DzgR+AKie4k9jfBQYV1VWF58h4AhgK2lHoHqLv/Ia9jUIis0JVS+OiYjH0/e/AI6pZTBFSJoEzALup47iT5tnHiKZKPJWkrnAuiIZ3wND/3P0f4HLgMoUNS+jvuIHCOBHkh5Ip9WB+vkMTQZ2Al9Lm/m+Kul3qJ/4gQZoYhquIvkTZEj3UZZ0OPDvwF9ExK+z+4Z6/BHRExEzSf4SPwV4eY1DKkzS24EdEfFArWN5kd4YEa8maR7+iKQ3Z3cO8c/QSODVwJcjYhbwDFXNSUM8fqAxEkSROaHqxS/TCQwrExnuqHE8vZI0iiQ5fDMilqXFdRN/RSQTR95B0iTTIqkyuHQof45OA86R9CjJNPtnkrSH10v8AEREZ/pzB8nUO6dQP5+hDqAjIu5Pt5eSJIx6iR9ojARRZE6oerEceF/6/n0kbftDjiQB/w94OCK+kNlVL/GPl9SSvm8meX7yMEmiOC89bMjGHxELI+LYiJhE8nm/PSL+kDqJH0DS70h6aeU98FZgA3XyGYqIXwDbJE1Li34faKdO4t+v1g9BBuMFnA08QtKO/Mlax1Mw5m8BjwN7SP4a+VOSduTbgJ8C/wkcVes4e4n9jSRV53XAQ+nr7DqK/2RgbRr/BuCKtPxEYBWwGbgZGF3rWAv8LqcDP6i3+NNYf5K+Nlb+v62Xz1Aa60xgTfo5agOOrKf4I8JTbZiZWb5GaGIyM7ND4ARhZma5nCDMzCyXE4SZmeVygjAzs1xOEFYXJP2TpL/IbK+U9NXM9uclXZp/Nki6XtJ56fsfS3rB4vGSRkm6Jp1p80FJ90p6W7rv0XTd9IONe/99e9m/OJ2ttF1Sd2aJ3fMkraiMxxhIkiZUZnjtZf9hku7KDKqzBuUEYfXibuANAJJGAOOAV2b2vwG450Xe4yqSCdR+L5IpHuYDL32R1+xTRHwkkik9zgZ+FsnMpTMjYmlEnB3JSO6BdilwbR8x7Sbpq/+eEu5tdcQJwurFPSTTXUCSGDYAT0s6UtJo4BXAg5KukLRa0gZJS9JR3f2S9BLgYuBjEfEcQET8MiK+k3Pspen1N1TVav5E0rp0HYmv55x3VVqjaCoY06OSxkmalK4pcL2kRyR9U9IfSLo7re2ckh7/O0rWEVmVThA3r5dLvwu4JalJwD0AAALMSURBVD3nlenxD6WxT0mPaQP+sEicNny5Cml1ISK2S9or6XiS2sK9JLORngr8ClgfEbslfSkirgRIv6TfDny/wC1OAh6LqkkFq0l6DXARyfTfAu6XdCewG/hb4A0R8YSko6rOW0RSG7koDm106knAu4EPkEwf816SEevnAH9DUtv5JMm0Gh9Im6ZWSfrPiHgmE8dkYFclCQIfAr4YEd9Mp6KpJK8NwGsPIU4bRlyDsHpyD0lyqCSIezPbd6fHnKFk1bT1JJPUvTLvQi/CG4HvRsQzEfEbYBnwpvReN0fEEwAR8VTmnE8BR0TEhw4xOQBsjYj1EbGPZOqJ29JrrSdZNwSS+YouT6cp/zEwBji+6joTSKahrrgX+BtJfw2cEBHdafw9wO7KfEjWmJwgrJ5UnkPMIPkL9z6SGsQbgHskjQH+BTgvImaQtLOPKXjtzcDxksYOeNTJX/yvqa5VHKTnMu/3Zbb38XxLgIB3ZZ5jHB8R2RXlALrJ/JtExI0ktZBuYIWkMzPHjgZ++yJitjrnBGH15B6SJqOnIlmv4SmghSRJ3MPzX3xPpGtR9Np7qFpEPEsyA+0X06aWyqyu76469L+A+ZJeks4y+s607Hbg3ZJelp6bTQa3ANcAPyz5L/KVwMcqz10kzco55hGer3Eg6URgS0T8M8nMoien5S8DnoiIPSXGa0OcE4TVk/UkvZfuqyr7VUQ8kfb4uZakdrGS5C/3g/G3JM0v7ZI2kCzVWb3Q0YMk64WvIlkl76sRsTYiNgKfAe6U9BPgC1Xn3ZzGtjydQrwMVwGjgHWSNqbbB0ifR/xM0klp0fnAhrRZ6veAG9LyM4AflhSn1QnP5mrWYCS9E3hNRPxtH8csAy6PiEcGLzIbatyLyazBRMR3K01hedImtjYnB3MNwszMcvkZhJmZ5XKCMDOzXE4QZmaWywnCzMxyOUGYmVmu/w/M1Tvo81vfOQAAAABJRU5ErkJggg=="
+ ]
},
"metadata": {
"needs_background": "light"
@@ -771,7 +665,7 @@
"source": [
"print('default xgboost accuracy', '=', 1 - sklearn_metric_loss_score('accuracy', y_pred_xgb, y_test))\n",
"print('default lgbm accuracy', '=', 1 - sklearn_metric_loss_score('accuracy', y_pred_lgbm, y_test))\n",
- "print('flaml accuracy', '=', 1 - sklearn_metric_loss_score('accuracy', y_pred, y_test))"
+ "print('flaml (4min) accuracy', '=', 1 - sklearn_metric_loss_score('accuracy', y_pred, y_test))"
],
"outputs": [
{
@@ -780,7 +674,7 @@
"text": [
"default xgboost accuracy = 0.6676060098186078\n",
"default lgbm accuracy = 0.6602346380315323\n",
- "flaml accuracy = 0.6729231864497278\n"
+ "flaml (4min) accuracy = 0.6713287750470908\n"
]
}
],
@@ -840,37 +734,23 @@
"\n",
"\n",
"class MyRegularizedGreedyForest(SKLearnEstimator):\n",
- "\n",
- "\n",
- " def __init__(self, task='binary', n_jobs=1, **params):\n",
+ " def __init__(self, task='binary', **config):\n",
" '''Constructor\n",
" \n",
" Args:\n",
" task: A string of the task type, one of\n",
" 'binary', 'multi', 'regression'\n",
- " n_jobs: An integer of the number of parallel threads\n",
- " params: A dictionary of the hyperparameter names and values\n",
+ " config: A dictionary containing the hyperparameter names\n",
+ " and 'n_jobs' as keys. n_jobs is the number of parallel threads.\n",
" '''\n",
"\n",
- " super().__init__(task, **params)\n",
+ " super().__init__(task, **config)\n",
"\n",
- " '''task=regression for RGFRegressor; \n",
- " binary or multiclass for RGFClassifier'''\n",
- " if 'regression' in task:\n",
- " self.estimator_class = RGFRegressor\n",
- " else:\n",
+ " '''task=binary or multi for classification task'''\n",
+ " if task in (\"binary\", \"multi\"):\n",
" self.estimator_class = RGFClassifier\n",
- "\n",
- " # convert to int for integer hyperparameters\n",
- " self.params = {\n",
- " \"n_jobs\": n_jobs,\n",
- " 'max_leaf': int(params['max_leaf']),\n",
- " 'n_iter': int(params['n_iter']),\n",
- " 'n_tree_search': int(params['n_tree_search']),\n",
- " 'opt_interval': int(params['opt_interval']),\n",
- " 'learning_rate': params['learning_rate'],\n",
- " 'min_samples_leaf': int(params['min_samples_leaf'])\n",
- " } \n",
+ " else:\n",
+ " self.estimator_class = RGFRegressor\n",
"\n",
" @classmethod\n",
" def search_space(cls, data_size, task):\n",
@@ -879,9 +759,10 @@
" Returns:\n",
" A dictionary of the search space. \n",
" Each key is the name of a hyperparameter, and value is a dict with\n",
- " its domain and init_value (optional), cat_hp_cost (optional) \n",
- " e.g., \n",
- " {'domain': tune.randint(lower=1, upper=10), 'init_value': 1}\n",
+ " its domain (required) and low_cost_init_value, init_value,\n",
+ " cat_hp_cost (if applicable).\n",
+ " e.g.,\n",
+ " {'domain': tune.randint(lower=1, upper=10), 'init_value': 1}.\n",
" '''\n",
" space = { \n",
" 'max_leaf': {'domain': tune.lograndint(lower=4, upper=data_size), 'init_value': 4, 'low_cost_init_value': 4},\n",
@@ -963,7 +844,92 @@
"\n",
"automl.fit(X_train = X_train, y_train = y_train, **settings)"
],
- "outputs": [],
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stderr",
+ "text": [
+ "[flaml.automl: 10-08 15:17:57] {1458} INFO - Data split method: stratified\n",
+ "[flaml.automl: 10-08 15:17:57] {1462} INFO - Evaluation method: holdout\n",
+ "[flaml.automl: 10-08 15:17:57] {1510} INFO - Minimizing error metric: 1-accuracy\n",
+ "[flaml.automl: 10-08 15:17:57] {1547} INFO - List of ML learners in AutoML Run: ['RGF', 'lgbm', 'rf', 'xgboost']\n",
+ "[flaml.automl: 10-08 15:17:57] {1777} INFO - iteration 0, current learner RGF\n",
+ "/home/dmx/miniconda2/envs/test/lib/python3.8/site-packages/rgf/utils.py:224: UserWarning: Cannot find FastRGF executable files. FastRGF estimators will be unavailable for usage.\n",
+ " warnings.warn(\"Cannot find FastRGF executable files. \"\n",
+ "[flaml.automl: 10-08 15:17:59] {1894} INFO - Estimated sufficient time budget=718418s. Estimated necessary time budget=718s.\n",
+ "[flaml.automl: 10-08 15:17:59] {1966} INFO - at 2.8s,\testimator RGF's best error=0.3787,\tbest estimator RGF's best error=0.3787\n",
+ "[flaml.automl: 10-08 15:17:59] {1777} INFO - iteration 1, current learner RGF\n",
+ "[flaml.automl: 10-08 15:18:00] {1966} INFO - at 4.1s,\testimator RGF's best error=0.3787,\tbest estimator RGF's best error=0.3787\n",
+ "[flaml.automl: 10-08 15:18:00] {1777} INFO - iteration 2, current learner RGF\n",
+ "[flaml.automl: 10-08 15:18:02] {1966} INFO - at 5.2s,\testimator RGF's best error=0.3787,\tbest estimator RGF's best error=0.3787\n",
+ "[flaml.automl: 10-08 15:18:02] {1777} INFO - iteration 3, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:02] {1966} INFO - at 5.3s,\testimator lgbm's best error=0.3777,\tbest estimator lgbm's best error=0.3777\n",
+ "[flaml.automl: 10-08 15:18:02] {1777} INFO - iteration 4, current learner RGF\n",
+ "[flaml.automl: 10-08 15:18:03] {1966} INFO - at 6.5s,\testimator RGF's best error=0.3787,\tbest estimator lgbm's best error=0.3777\n",
+ "[flaml.automl: 10-08 15:18:03] {1777} INFO - iteration 5, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:03] {1966} INFO - at 6.6s,\testimator lgbm's best error=0.3777,\tbest estimator lgbm's best error=0.3777\n",
+ "[flaml.automl: 10-08 15:18:03] {1777} INFO - iteration 6, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:03] {1966} INFO - at 6.7s,\testimator lgbm's best error=0.3777,\tbest estimator lgbm's best error=0.3777\n",
+ "[flaml.automl: 10-08 15:18:03] {1777} INFO - iteration 7, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:03] {1966} INFO - at 6.8s,\testimator lgbm's best error=0.3777,\tbest estimator lgbm's best error=0.3777\n",
+ "[flaml.automl: 10-08 15:18:03] {1777} INFO - iteration 8, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:03] {1966} INFO - at 6.8s,\testimator lgbm's best error=0.3777,\tbest estimator lgbm's best error=0.3777\n",
+ "[flaml.automl: 10-08 15:18:03] {1777} INFO - iteration 9, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:03] {1966} INFO - at 6.9s,\testimator lgbm's best error=0.3777,\tbest estimator lgbm's best error=0.3777\n",
+ "[flaml.automl: 10-08 15:18:03] {1777} INFO - iteration 10, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:03] {1966} INFO - at 7.1s,\testimator lgbm's best error=0.3765,\tbest estimator lgbm's best error=0.3765\n",
+ "[flaml.automl: 10-08 15:18:03] {1777} INFO - iteration 11, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:04] {1966} INFO - at 7.3s,\testimator lgbm's best error=0.3765,\tbest estimator lgbm's best error=0.3765\n",
+ "[flaml.automl: 10-08 15:18:04] {1777} INFO - iteration 12, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:04] {1966} INFO - at 7.5s,\testimator lgbm's best error=0.3765,\tbest estimator lgbm's best error=0.3765\n",
+ "[flaml.automl: 10-08 15:18:04] {1777} INFO - iteration 13, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:04] {1966} INFO - at 7.7s,\testimator lgbm's best error=0.3750,\tbest estimator lgbm's best error=0.3750\n",
+ "[flaml.automl: 10-08 15:18:04] {1777} INFO - iteration 14, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:04] {1966} INFO - at 7.9s,\testimator lgbm's best error=0.3750,\tbest estimator lgbm's best error=0.3750\n",
+ "[flaml.automl: 10-08 15:18:04] {1777} INFO - iteration 15, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:04] {1966} INFO - at 8.1s,\testimator lgbm's best error=0.3604,\tbest estimator lgbm's best error=0.3604\n",
+ "[flaml.automl: 10-08 15:18:04] {1777} INFO - iteration 16, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:05] {1966} INFO - at 8.3s,\testimator lgbm's best error=0.3604,\tbest estimator lgbm's best error=0.3604\n",
+ "[flaml.automl: 10-08 15:18:05] {1777} INFO - iteration 17, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:05] {1966} INFO - at 8.6s,\testimator lgbm's best error=0.3604,\tbest estimator lgbm's best error=0.3604\n",
+ "[flaml.automl: 10-08 15:18:05] {1777} INFO - iteration 18, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:05] {1966} INFO - at 8.8s,\testimator lgbm's best error=0.3600,\tbest estimator lgbm's best error=0.3600\n",
+ "[flaml.automl: 10-08 15:18:05] {1777} INFO - iteration 19, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:05] {1966} INFO - at 8.9s,\testimator lgbm's best error=0.3600,\tbest estimator lgbm's best error=0.3600\n",
+ "[flaml.automl: 10-08 15:18:05] {1777} INFO - iteration 20, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:05] {1966} INFO - at 9.0s,\testimator lgbm's best error=0.3600,\tbest estimator lgbm's best error=0.3600\n",
+ "[flaml.automl: 10-08 15:18:05] {1777} INFO - iteration 21, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:05] {1966} INFO - at 9.2s,\testimator lgbm's best error=0.3600,\tbest estimator lgbm's best error=0.3600\n",
+ "[flaml.automl: 10-08 15:18:05] {1777} INFO - iteration 22, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:06] {1966} INFO - at 9.3s,\testimator lgbm's best error=0.3600,\tbest estimator lgbm's best error=0.3600\n",
+ "[flaml.automl: 10-08 15:18:06] {1777} INFO - iteration 23, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:06] {1966} INFO - at 9.4s,\testimator lgbm's best error=0.3600,\tbest estimator lgbm's best error=0.3600\n",
+ "[flaml.automl: 10-08 15:18:06] {1777} INFO - iteration 24, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:06] {1966} INFO - at 9.6s,\testimator lgbm's best error=0.3600,\tbest estimator lgbm's best error=0.3600\n",
+ "[flaml.automl: 10-08 15:18:06] {1777} INFO - iteration 25, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:06] {1966} INFO - at 9.7s,\testimator lgbm's best error=0.3600,\tbest estimator lgbm's best error=0.3600\n",
+ "[flaml.automl: 10-08 15:18:06] {1777} INFO - iteration 26, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:06] {1966} INFO - at 9.8s,\testimator lgbm's best error=0.3600,\tbest estimator lgbm's best error=0.3600\n",
+ "[flaml.automl: 10-08 15:18:06] {1777} INFO - iteration 27, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:06] {1966} INFO - at 9.9s,\testimator xgboost's best error=0.3787,\tbest estimator lgbm's best error=0.3600\n",
+ "[flaml.automl: 10-08 15:18:06] {1777} INFO - iteration 28, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:06] {1966} INFO - at 9.9s,\testimator xgboost's best error=0.3787,\tbest estimator lgbm's best error=0.3600\n",
+ "[flaml.automl: 10-08 15:18:06] {1777} INFO - iteration 29, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:06] {1966} INFO - at 10.0s,\testimator xgboost's best error=0.3765,\tbest estimator lgbm's best error=0.3600\n",
+ "[flaml.automl: 10-08 15:18:06] {1777} INFO - iteration 30, current learner rf\n",
+ "[flaml.automl: 10-08 15:18:06] {1966} INFO - at 10.0s,\testimator rf's best error=0.3787,\tbest estimator lgbm's best error=0.3600\n",
+ "[flaml.automl: 10-08 15:18:06] {2073} INFO - selected model: LGBMClassifier(colsample_bytree=0.868332929662737,\n",
+ " learning_rate=0.5372172315260287, max_bin=255,\n",
+ " min_child_samples=24, n_estimators=4, num_leaves=23,\n",
+ " reg_alpha=0.006958608037974516, reg_lambda=0.07314321471228555,\n",
+ " verbose=-1)\n",
+ "[flaml.automl: 10-08 15:18:06] {2144} INFO - not retraining because the time budget is too small.\n",
+ "[flaml.automl: 10-08 15:18:06] {1571} INFO - fit succeeded\n",
+ "[flaml.automl: 10-08 15:18:06] {1572} INFO - Time taken to find the best model: 8.79496955871582\n",
+ "[flaml.automl: 10-08 15:18:06] {1583} WARNING - Time taken to find the best model is 88% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n"
+ ]
+ }
+ ],
"metadata": {
"slideshow": {
"slide_type": "slide"
@@ -1034,104 +1000,121 @@
"output_type": "stream",
"name": "stderr",
"text": [
- "[flaml.automl: 08-31 00:58:32] {1279} INFO - Evaluation method: holdout\n",
- "[flaml.automl: 08-31 00:58:32] {1312} INFO - Minimizing error metric: customized metric\n",
- "[flaml.automl: 08-31 00:58:32] {1338} INFO - List of ML learners in AutoML Run: ['lgbm', 'rf', 'catboost', 'xgboost', 'extra_tree', 'lrl1']\n",
- "[flaml.automl: 08-31 00:58:32] {1532} INFO - iteration 0, current learner lgbm\n",
- "[flaml.automl: 08-31 00:58:32] {1689} INFO - at 1.3s,\tbest lgbm's error=0.6646,\tbest lgbm's error=0.6646\n",
- "[flaml.automl: 08-31 00:58:32] {1532} INFO - iteration 1, current learner lgbm\n",
- "[flaml.automl: 08-31 00:58:33] {1689} INFO - at 1.4s,\tbest lgbm's error=0.6646,\tbest lgbm's error=0.6646\n",
- "[flaml.automl: 08-31 00:58:33] {1532} INFO - iteration 2, current learner lgbm\n",
- "[flaml.automl: 08-31 00:58:33] {1689} INFO - at 1.6s,\tbest lgbm's error=0.6493,\tbest lgbm's error=0.6493\n",
- "[flaml.automl: 08-31 00:58:33] {1532} INFO - iteration 3, current learner xgboost\n",
- "[flaml.automl: 08-31 00:58:33] {1689} INFO - at 1.8s,\tbest xgboost's error=0.6672,\tbest lgbm's error=0.6493\n",
- "[flaml.automl: 08-31 00:58:33] {1532} INFO - iteration 4, current learner lgbm\n",
- "[flaml.automl: 08-31 00:58:33] {1689} INFO - at 1.9s,\tbest lgbm's error=0.6419,\tbest lgbm's error=0.6419\n",
- "[flaml.automl: 08-31 00:58:33] {1532} INFO - iteration 5, current learner lgbm\n",
- "[flaml.automl: 08-31 00:58:33] {1689} INFO - at 2.1s,\tbest lgbm's error=0.6419,\tbest lgbm's error=0.6419\n",
- "[flaml.automl: 08-31 00:58:33] {1532} INFO - iteration 6, current learner lgbm\n",
- "[flaml.automl: 08-31 00:58:33] {1689} INFO - at 2.3s,\tbest lgbm's error=0.6419,\tbest lgbm's error=0.6419\n",
- "[flaml.automl: 08-31 00:58:33] {1532} INFO - iteration 7, current learner lgbm\n",
- "[flaml.automl: 08-31 00:58:34] {1689} INFO - at 2.5s,\tbest lgbm's error=0.6375,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:34] {1532} INFO - iteration 8, current learner lgbm\n",
- "[flaml.automl: 08-31 00:58:34] {1689} INFO - at 2.6s,\tbest lgbm's error=0.6375,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:34] {1532} INFO - iteration 9, current learner xgboost\n",
- "[flaml.automl: 08-31 00:58:34] {1689} INFO - at 2.8s,\tbest xgboost's error=0.6672,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:34] {1532} INFO - iteration 10, current learner xgboost\n",
- "[flaml.automl: 08-31 00:58:34] {1689} INFO - at 2.9s,\tbest xgboost's error=0.6500,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:34] {1532} INFO - iteration 11, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:58:34] {1689} INFO - at 3.1s,\tbest extra_tree's error=0.6607,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:34] {1532} INFO - iteration 12, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:58:34] {1689} INFO - at 3.4s,\tbest extra_tree's error=0.6480,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:34] {1532} INFO - iteration 13, current learner rf\n",
- "[flaml.automl: 08-31 00:58:35] {1689} INFO - at 3.6s,\tbest rf's error=0.6483,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:35] {1532} INFO - iteration 14, current learner rf\n",
- "[flaml.automl: 08-31 00:58:35] {1689} INFO - at 3.8s,\tbest rf's error=0.6430,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:35] {1532} INFO - iteration 15, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:58:35] {1689} INFO - at 4.0s,\tbest extra_tree's error=0.6480,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:35] {1532} INFO - iteration 16, current learner lgbm\n",
- "[flaml.automl: 08-31 00:58:35] {1689} INFO - at 4.2s,\tbest lgbm's error=0.6375,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:35] {1532} INFO - iteration 17, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:58:36] {1689} INFO - at 4.4s,\tbest extra_tree's error=0.6480,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:36] {1532} INFO - iteration 18, current learner lgbm\n",
- "[flaml.automl: 08-31 00:58:36] {1689} INFO - at 4.5s,\tbest lgbm's error=0.6375,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:36] {1532} INFO - iteration 19, current learner xgboost\n",
- "[flaml.automl: 08-31 00:58:36] {1689} INFO - at 4.7s,\tbest xgboost's error=0.6413,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:36] {1532} INFO - iteration 20, current learner xgboost\n",
- "[flaml.automl: 08-31 00:58:36] {1689} INFO - at 4.9s,\tbest xgboost's error=0.6413,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:36] {1532} INFO - iteration 21, current learner xgboost\n",
- "[flaml.automl: 08-31 00:58:36] {1689} INFO - at 5.1s,\tbest xgboost's error=0.6413,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:36] {1532} INFO - iteration 22, current learner lgbm\n",
- "[flaml.automl: 08-31 00:58:36] {1689} INFO - at 5.2s,\tbest lgbm's error=0.6375,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:36] {1532} INFO - iteration 23, current learner lgbm\n",
- "[flaml.automl: 08-31 00:58:37] {1689} INFO - at 5.4s,\tbest lgbm's error=0.6375,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:37] {1532} INFO - iteration 24, current learner xgboost\n",
- "[flaml.automl: 08-31 00:58:37] {1689} INFO - at 5.6s,\tbest xgboost's error=0.6413,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:37] {1532} INFO - iteration 25, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:58:37] {1689} INFO - at 5.8s,\tbest extra_tree's error=0.6428,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:37] {1532} INFO - iteration 26, current learner rf\n",
- "[flaml.automl: 08-31 00:58:37] {1689} INFO - at 6.1s,\tbest rf's error=0.6430,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:37] {1532} INFO - iteration 27, current learner xgboost\n",
- "[flaml.automl: 08-31 00:58:37] {1689} INFO - at 6.3s,\tbest xgboost's error=0.6413,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:37] {1532} INFO - iteration 28, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:58:38] {1689} INFO - at 6.5s,\tbest extra_tree's error=0.6428,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:38] {1532} INFO - iteration 29, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:58:38] {1689} INFO - at 6.8s,\tbest extra_tree's error=0.6428,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:38] {1532} INFO - iteration 30, current learner lgbm\n",
- "[flaml.automl: 08-31 00:58:38] {1689} INFO - at 7.1s,\tbest lgbm's error=0.6375,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:38] {1532} INFO - iteration 31, current learner lgbm\n",
- "[flaml.automl: 08-31 00:58:39] {1689} INFO - at 7.4s,\tbest lgbm's error=0.6375,\tbest lgbm's error=0.6375\n",
- "[flaml.automl: 08-31 00:58:39] {1532} INFO - iteration 32, current learner lgbm\n",
- "[flaml.automl: 08-31 00:58:39] {1689} INFO - at 7.8s,\tbest lgbm's error=0.6366,\tbest lgbm's error=0.6366\n",
- "[flaml.automl: 08-31 00:58:39] {1532} INFO - iteration 33, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:58:39] {1689} INFO - at 7.9s,\tbest extra_tree's error=0.6428,\tbest lgbm's error=0.6366\n",
- "[flaml.automl: 08-31 00:58:39] {1532} INFO - iteration 34, current learner lgbm\n",
- "[flaml.automl: 08-31 00:58:39] {1689} INFO - at 8.3s,\tbest lgbm's error=0.6366,\tbest lgbm's error=0.6366\n",
- "[flaml.automl: 08-31 00:58:39] {1532} INFO - iteration 35, current learner rf\n",
- "[flaml.automl: 08-31 00:58:40] {1689} INFO - at 8.4s,\tbest rf's error=0.6430,\tbest lgbm's error=0.6366\n",
- "[flaml.automl: 08-31 00:58:40] {1532} INFO - iteration 36, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:58:40] {1689} INFO - at 8.6s,\tbest extra_tree's error=0.6428,\tbest lgbm's error=0.6366\n",
- "[flaml.automl: 08-31 00:58:40] {1532} INFO - iteration 37, current learner xgboost\n",
- "[flaml.automl: 08-31 00:58:40] {1689} INFO - at 9.0s,\tbest xgboost's error=0.6393,\tbest lgbm's error=0.6366\n",
- "[flaml.automl: 08-31 00:58:40] {1532} INFO - iteration 38, current learner xgboost\n",
- "[flaml.automl: 08-31 00:58:40] {1689} INFO - at 9.2s,\tbest xgboost's error=0.6393,\tbest lgbm's error=0.6366\n",
- "[flaml.automl: 08-31 00:58:40] {1532} INFO - iteration 39, current learner lgbm\n",
- "[flaml.automl: 08-31 00:58:41] {1689} INFO - at 9.5s,\tbest lgbm's error=0.6366,\tbest lgbm's error=0.6366\n",
- "[flaml.automl: 08-31 00:58:41] {1532} INFO - iteration 40, current learner rf\n",
- "[flaml.automl: 08-31 00:58:41] {1689} INFO - at 9.6s,\tbest rf's error=0.6430,\tbest lgbm's error=0.6366\n",
- "[flaml.automl: 08-31 00:58:41] {1532} INFO - iteration 41, current learner extra_tree\n",
- "[flaml.automl: 08-31 00:58:41] {1689} INFO - at 9.8s,\tbest extra_tree's error=0.6428,\tbest lgbm's error=0.6366\n",
- "[flaml.automl: 08-31 00:58:41] {1532} INFO - iteration 42, current learner catboost\n",
- "[flaml.automl: 08-31 00:58:41] {1689} INFO - at 10.0s,\tbest catboost's error=0.6846,\tbest lgbm's error=0.6366\n",
- "[flaml.automl: 08-31 00:58:41] {1766} INFO - selected model: LGBMClassifier(colsample_bytree=0.9318698026248824,\n",
- " learning_rate=0.3731008541260285, max_bin=256,\n",
- " min_child_samples=18, n_estimators=4, num_leaves=19,\n",
- " objective='binary', reg_alpha=0.04956056407320672,\n",
- " reg_lambda=3.319594476328555, verbose=-1)\n",
- "[flaml.automl: 08-31 00:58:41] {1820} INFO - not retraining because the time budget is too small.\n",
- "[flaml.automl: 08-31 00:58:41] {1364} INFO - fit succeeded\n",
- "[flaml.automl: 08-31 00:58:41] {1365} INFO - Time taken to find the best model: 7.795238971710205\n",
- "[flaml.automl: 08-31 00:58:41] {1370} WARNING - Time taken to find the best model is 78% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n"
+ "[flaml.automl: 10-08 15:18:07] {1458} INFO - Data split method: stratified\n",
+ "[flaml.automl: 10-08 15:18:07] {1462} INFO - Evaluation method: holdout\n",
+ "[flaml.automl: 10-08 15:18:07] {1510} INFO - Minimizing error metric: customized metric\n",
+ "[flaml.automl: 10-08 15:18:07] {1547} INFO - List of ML learners in AutoML Run: ['lgbm', 'rf', 'xgboost', 'extra_tree', 'lrl1']\n",
+ "[flaml.automl: 10-08 15:18:07] {1777} INFO - iteration 0, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:07] {1894} INFO - Estimated sufficient time budget=33595s. Estimated necessary time budget=559s.\n",
+ "[flaml.automl: 10-08 15:18:07] {1966} INFO - at 1.0s,\testimator lgbm's best error=0.6647,\tbest estimator lgbm's best error=0.6647\n",
+ "[flaml.automl: 10-08 15:18:07] {1777} INFO - iteration 1, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:08] {1966} INFO - at 1.1s,\testimator lgbm's best error=0.6647,\tbest estimator lgbm's best error=0.6647\n",
+ "[flaml.automl: 10-08 15:18:08] {1777} INFO - iteration 2, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:08] {1966} INFO - at 1.2s,\testimator lgbm's best error=0.6491,\tbest estimator lgbm's best error=0.6491\n",
+ "[flaml.automl: 10-08 15:18:08] {1777} INFO - iteration 3, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:08] {1966} INFO - at 1.2s,\testimator xgboost's best error=0.6672,\tbest estimator lgbm's best error=0.6491\n",
+ "[flaml.automl: 10-08 15:18:08] {1777} INFO - iteration 4, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:08] {1966} INFO - at 1.4s,\testimator lgbm's best error=0.6423,\tbest estimator lgbm's best error=0.6423\n",
+ "[flaml.automl: 10-08 15:18:08] {1777} INFO - iteration 5, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:08] {1966} INFO - at 1.5s,\testimator lgbm's best error=0.6423,\tbest estimator lgbm's best error=0.6423\n",
+ "[flaml.automl: 10-08 15:18:08] {1777} INFO - iteration 6, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:08] {1966} INFO - at 1.6s,\testimator lgbm's best error=0.6400,\tbest estimator lgbm's best error=0.6400\n",
+ "[flaml.automl: 10-08 15:18:08] {1777} INFO - iteration 7, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:08] {1966} INFO - at 1.7s,\testimator lgbm's best error=0.6400,\tbest estimator lgbm's best error=0.6400\n",
+ "[flaml.automl: 10-08 15:18:08] {1777} INFO - iteration 8, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:08] {1966} INFO - at 1.9s,\testimator lgbm's best error=0.6400,\tbest estimator lgbm's best error=0.6400\n",
+ "[flaml.automl: 10-08 15:18:08] {1777} INFO - iteration 9, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:08] {1966} INFO - at 2.0s,\testimator xgboost's best error=0.6672,\tbest estimator lgbm's best error=0.6400\n",
+ "[flaml.automl: 10-08 15:18:08] {1777} INFO - iteration 10, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:09] {1966} INFO - at 2.1s,\testimator xgboost's best error=0.6500,\tbest estimator lgbm's best error=0.6400\n",
+ "[flaml.automl: 10-08 15:18:09] {1777} INFO - iteration 11, current learner extra_tree\n",
+ "[flaml.automl: 10-08 15:18:09] {1966} INFO - at 2.3s,\testimator extra_tree's best error=0.6536,\tbest estimator lgbm's best error=0.6400\n",
+ "[flaml.automl: 10-08 15:18:09] {1777} INFO - iteration 12, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:09] {1966} INFO - at 2.4s,\testimator xgboost's best error=0.6413,\tbest estimator lgbm's best error=0.6400\n",
+ "[flaml.automl: 10-08 15:18:09] {1777} INFO - iteration 13, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:09] {1966} INFO - at 2.5s,\testimator xgboost's best error=0.6413,\tbest estimator lgbm's best error=0.6400\n",
+ "[flaml.automl: 10-08 15:18:09] {1777} INFO - iteration 14, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:09] {1966} INFO - at 2.6s,\testimator xgboost's best error=0.6413,\tbest estimator lgbm's best error=0.6400\n",
+ "[flaml.automl: 10-08 15:18:09] {1777} INFO - iteration 15, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:09] {1966} INFO - at 2.8s,\testimator lgbm's best error=0.6400,\tbest estimator lgbm's best error=0.6400\n",
+ "[flaml.automl: 10-08 15:18:09] {1777} INFO - iteration 16, current learner extra_tree\n",
+ "[flaml.automl: 10-08 15:18:09] {1966} INFO - at 3.0s,\testimator extra_tree's best error=0.6446,\tbest estimator lgbm's best error=0.6400\n",
+ "[flaml.automl: 10-08 15:18:09] {1777} INFO - iteration 17, current learner rf\n",
+ "[flaml.automl: 10-08 15:18:10] {1966} INFO - at 3.2s,\testimator rf's best error=0.6470,\tbest estimator lgbm's best error=0.6400\n",
+ "[flaml.automl: 10-08 15:18:10] {1777} INFO - iteration 18, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:10] {1966} INFO - at 3.4s,\testimator lgbm's best error=0.6400,\tbest estimator lgbm's best error=0.6400\n",
+ "[flaml.automl: 10-08 15:18:10] {1777} INFO - iteration 19, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:10] {1966} INFO - at 3.6s,\testimator xgboost's best error=0.6413,\tbest estimator lgbm's best error=0.6400\n",
+ "[flaml.automl: 10-08 15:18:10] {1777} INFO - iteration 20, current learner rf\n",
+ "[flaml.automl: 10-08 15:18:10] {1966} INFO - at 3.8s,\testimator rf's best error=0.6411,\tbest estimator lgbm's best error=0.6400\n",
+ "[flaml.automl: 10-08 15:18:10] {1777} INFO - iteration 21, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:10] {1966} INFO - at 4.0s,\testimator xgboost's best error=0.6413,\tbest estimator lgbm's best error=0.6400\n",
+ "[flaml.automl: 10-08 15:18:10] {1777} INFO - iteration 22, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:11] {1966} INFO - at 4.4s,\testimator lgbm's best error=0.6358,\tbest estimator lgbm's best error=0.6358\n",
+ "[flaml.automl: 10-08 15:18:11] {1777} INFO - iteration 23, current learner rf\n",
+ "[flaml.automl: 10-08 15:18:11] {1966} INFO - at 4.6s,\testimator rf's best error=0.6411,\tbest estimator lgbm's best error=0.6358\n",
+ "[flaml.automl: 10-08 15:18:11] {1777} INFO - iteration 24, current learner extra_tree\n",
+ "[flaml.automl: 10-08 15:18:11] {1966} INFO - at 4.7s,\testimator extra_tree's best error=0.6446,\tbest estimator lgbm's best error=0.6358\n",
+ "[flaml.automl: 10-08 15:18:11] {1777} INFO - iteration 25, current learner extra_tree\n",
+ "[flaml.automl: 10-08 15:18:11] {1966} INFO - at 4.9s,\testimator extra_tree's best error=0.6446,\tbest estimator lgbm's best error=0.6358\n",
+ "[flaml.automl: 10-08 15:18:11] {1777} INFO - iteration 26, current learner rf\n",
+ "[flaml.automl: 10-08 15:18:12] {1966} INFO - at 5.1s,\testimator rf's best error=0.6411,\tbest estimator lgbm's best error=0.6358\n",
+ "[flaml.automl: 10-08 15:18:12] {1777} INFO - iteration 27, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:12] {1966} INFO - at 5.3s,\testimator xgboost's best error=0.6393,\tbest estimator lgbm's best error=0.6358\n",
+ "[flaml.automl: 10-08 15:18:12] {1777} INFO - iteration 28, current learner extra_tree\n",
+ "[flaml.automl: 10-08 15:18:12] {1966} INFO - at 5.4s,\testimator extra_tree's best error=0.6436,\tbest estimator lgbm's best error=0.6358\n",
+ "[flaml.automl: 10-08 15:18:12] {1777} INFO - iteration 29, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:12] {1966} INFO - at 5.7s,\testimator xgboost's best error=0.6342,\tbest estimator xgboost's best error=0.6342\n",
+ "[flaml.automl: 10-08 15:18:12] {1777} INFO - iteration 30, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:12] {1966} INFO - at 6.0s,\testimator lgbm's best error=0.6351,\tbest estimator xgboost's best error=0.6342\n",
+ "[flaml.automl: 10-08 15:18:12] {1777} INFO - iteration 31, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:13] {1966} INFO - at 6.3s,\testimator lgbm's best error=0.6351,\tbest estimator xgboost's best error=0.6342\n",
+ "[flaml.automl: 10-08 15:18:13] {1777} INFO - iteration 32, current learner rf\n",
+ "[flaml.automl: 10-08 15:18:13] {1966} INFO - at 6.4s,\testimator rf's best error=0.6411,\tbest estimator xgboost's best error=0.6342\n",
+ "[flaml.automl: 10-08 15:18:13] {1777} INFO - iteration 33, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:13] {1966} INFO - at 6.7s,\testimator xgboost's best error=0.6342,\tbest estimator xgboost's best error=0.6342\n",
+ "[flaml.automl: 10-08 15:18:13] {1777} INFO - iteration 34, current learner lgbm\n",
+ "[flaml.automl: 10-08 15:18:13] {1966} INFO - at 6.9s,\testimator lgbm's best error=0.6351,\tbest estimator xgboost's best error=0.6342\n",
+ "[flaml.automl: 10-08 15:18:13] {1777} INFO - iteration 35, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:14] {1966} INFO - at 7.1s,\testimator xgboost's best error=0.6342,\tbest estimator xgboost's best error=0.6342\n",
+ "[flaml.automl: 10-08 15:18:14] {1777} INFO - iteration 36, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:14] {1966} INFO - at 7.3s,\testimator xgboost's best error=0.6330,\tbest estimator xgboost's best error=0.6330\n",
+ "[flaml.automl: 10-08 15:18:14] {1777} INFO - iteration 37, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:14] {1966} INFO - at 7.5s,\testimator xgboost's best error=0.6330,\tbest estimator xgboost's best error=0.6330\n",
+ "[flaml.automl: 10-08 15:18:14] {1777} INFO - iteration 38, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:14] {1966} INFO - at 7.8s,\testimator xgboost's best error=0.6330,\tbest estimator xgboost's best error=0.6330\n",
+ "[flaml.automl: 10-08 15:18:14] {1777} INFO - iteration 39, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:14] {1966} INFO - at 8.0s,\testimator xgboost's best error=0.6330,\tbest estimator xgboost's best error=0.6330\n",
+ "[flaml.automl: 10-08 15:18:14] {1777} INFO - iteration 40, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:15] {1966} INFO - at 8.2s,\testimator xgboost's best error=0.6330,\tbest estimator xgboost's best error=0.6330\n",
+ "[flaml.automl: 10-08 15:18:15] {1777} INFO - iteration 41, current learner xgboost\n",
+ "[flaml.automl: 10-08 15:18:17] {1966} INFO - at 10.1s,\testimator xgboost's best error=0.6290,\tbest estimator xgboost's best error=0.6290\n",
+ "[flaml.automl: 10-08 15:18:17] {2073} INFO - selected model: XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1.0,\n",
+ " colsample_bynode=1, colsample_bytree=0.7942569492674472, gamma=0,\n",
+ " gpu_id=-1, grow_policy='lossguide', importance_type='gain',\n",
+ " interaction_constraints='', learning_rate=0.6413547778096401,\n",
+ " max_delta_step=0, max_depth=0, max_leaves=17,\n",
+ " min_child_weight=13.753540541999772, missing=nan,\n",
+ " monotone_constraints='()', n_estimators=4, n_jobs=-1,\n",
+ " num_parallel_tree=1, random_state=0,\n",
+ " reg_alpha=0.016714365103792518, reg_lambda=0.4874780682949813,\n",
+ " scale_pos_weight=1, subsample=1.0, tree_method='hist',\n",
+ " use_label_encoder=False, validate_parameters=1, verbosity=0)\n",
+ "[flaml.automl: 10-08 15:18:18] {2136} INFO - retrain xgboost for 1.8s\n",
+ "[flaml.automl: 10-08 15:18:18] {2142} INFO - retrained model: XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1.0,\n",
+ " colsample_bynode=1, colsample_bytree=0.7942569492674472, gamma=0,\n",
+ " gpu_id=-1, grow_policy='lossguide', importance_type='gain',\n",
+ " interaction_constraints='', learning_rate=0.6413547778096401,\n",
+ " max_delta_step=0, max_depth=0, max_leaves=17,\n",
+ " min_child_weight=13.753540541999772, missing=nan,\n",
+ " monotone_constraints='()', n_estimators=4, n_jobs=-1,\n",
+ " num_parallel_tree=1, random_state=0,\n",
+ " reg_alpha=0.016714365103792518, reg_lambda=0.4874780682949813,\n",
+ " scale_pos_weight=1, subsample=1.0, tree_method='hist',\n",
+ " use_label_encoder=False, validate_parameters=1, verbosity=0)\n",
+ "[flaml.automl: 10-08 15:18:18] {1571} INFO - fit succeeded\n",
+ "[flaml.automl: 10-08 15:18:18] {1572} INFO - Time taken to find the best model: 10.063513994216919\n",
+ "[flaml.automl: 10-08 15:18:18] {1583} WARNING - Time taken to find the best model is 101% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n"
]
}
],
@@ -1140,11 +1123,11 @@
],
"metadata": {
"interpreter": {
- "hash": "0cfea3304185a9579d09e0953576b57c8581e46e6ebc6dfeb681bc5a511f7544"
+ "hash": "ea9f131eb1b7663628f6445553ba215a834e2f0b4d18774746f0f47938ce4671"
},
"kernelspec": {
"name": "python3",
- "display_name": "Python 3.8.0 64-bit ('blend': conda)"
+ "display_name": "Python 3.8.0 64-bit ('test': conda)"
},
"language_info": {
"codemirror_mode": {
diff --git a/notebook/flaml_lightgbm.ipynb b/notebook/flaml_lightgbm.ipynb
index e92f1da0e..529d20bf0 100644
--- a/notebook/flaml_lightgbm.ipynb
+++ b/notebook/flaml_lightgbm.ipynb
@@ -3,7 +3,7 @@
{
"cell_type": "markdown",
"source": [
- "Copyright (c) 2020-2021 Microsoft Corporation. All rights reserved. \n",
+ "Copyright (c) Microsoft Corporation. All rights reserved. \n",
"\n",
"Licensed under the MIT License.\n",
"\n",
@@ -142,112 +142,76 @@
"output_type": "stream",
"name": "stderr",
"text": [
- "[flaml.automl: 08-22 21:09:17] {1130} INFO - Evaluation method: cv\n",
- "[flaml.automl: 08-22 21:09:17] {634} INFO - Using RepeatedKFold\n",
- "[flaml.automl: 08-22 21:09:17] {1155} INFO - Minimizing error metric: 1-r2\n",
- "[flaml.automl: 08-22 21:09:17] {1175} INFO - List of ML learners in AutoML Run: ['lgbm']\n",
- "[flaml.automl: 08-22 21:09:17] {1358} INFO - iteration 0, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:18] {1515} INFO - at 0.5s,\tbest lgbm's error=0.7385,\tbest lgbm's error=0.7385\n",
- "[flaml.automl: 08-22 21:09:18] {1358} INFO - iteration 1, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:18] {1515} INFO - at 0.7s,\tbest lgbm's error=0.7385,\tbest lgbm's error=0.7385\n",
- "[flaml.automl: 08-22 21:09:18] {1358} INFO - iteration 2, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:18] {1515} INFO - at 0.8s,\tbest lgbm's error=0.5517,\tbest lgbm's error=0.5517\n",
- "[flaml.automl: 08-22 21:09:18] {1358} INFO - iteration 3, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:18] {1515} INFO - at 1.0s,\tbest lgbm's error=0.3103,\tbest lgbm's error=0.3103\n",
- "[flaml.automl: 08-22 21:09:18] {1358} INFO - iteration 4, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:19] {1515} INFO - at 1.1s,\tbest lgbm's error=0.3103,\tbest lgbm's error=0.3103\n",
- "[flaml.automl: 08-22 21:09:19] {1358} INFO - iteration 5, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:19] {1515} INFO - at 1.4s,\tbest lgbm's error=0.2718,\tbest lgbm's error=0.2718\n",
- "[flaml.automl: 08-22 21:09:19] {1358} INFO - iteration 6, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:19] {1515} INFO - at 1.6s,\tbest lgbm's error=0.2718,\tbest lgbm's error=0.2718\n",
- "[flaml.automl: 08-22 21:09:19] {1358} INFO - iteration 7, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:19] {1515} INFO - at 1.8s,\tbest lgbm's error=0.2718,\tbest lgbm's error=0.2718\n",
- "[flaml.automl: 08-22 21:09:19] {1358} INFO - iteration 8, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:19] {1515} INFO - at 2.0s,\tbest lgbm's error=0.2406,\tbest lgbm's error=0.2406\n",
- "[flaml.automl: 08-22 21:09:19] {1358} INFO - iteration 9, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:20] {1515} INFO - at 2.2s,\tbest lgbm's error=0.2406,\tbest lgbm's error=0.2406\n",
- "[flaml.automl: 08-22 21:09:20] {1358} INFO - iteration 10, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:20] {1515} INFO - at 2.8s,\tbest lgbm's error=0.1787,\tbest lgbm's error=0.1787\n",
- "[flaml.automl: 08-22 21:09:20] {1358} INFO - iteration 11, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:21] {1515} INFO - at 3.7s,\tbest lgbm's error=0.1787,\tbest lgbm's error=0.1787\n",
- "[flaml.automl: 08-22 21:09:21] {1358} INFO - iteration 12, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:22] {1515} INFO - at 4.3s,\tbest lgbm's error=0.1787,\tbest lgbm's error=0.1787\n",
- "[flaml.automl: 08-22 21:09:22] {1358} INFO - iteration 13, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:22] {1515} INFO - at 4.9s,\tbest lgbm's error=0.1787,\tbest lgbm's error=0.1787\n",
- "[flaml.automl: 08-22 21:09:22] {1358} INFO - iteration 14, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:23] {1515} INFO - at 5.8s,\tbest lgbm's error=0.1765,\tbest lgbm's error=0.1765\n",
- "[flaml.automl: 08-22 21:09:23] {1358} INFO - iteration 15, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:25] {1515} INFO - at 7.6s,\tbest lgbm's error=0.1765,\tbest lgbm's error=0.1765\n",
- "[flaml.automl: 08-22 21:09:25] {1358} INFO - iteration 16, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:26] {1515} INFO - at 8.3s,\tbest lgbm's error=0.1765,\tbest lgbm's error=0.1765\n",
- "[flaml.automl: 08-22 21:09:26] {1358} INFO - iteration 17, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:28] {1515} INFO - at 10.3s,\tbest lgbm's error=0.1765,\tbest lgbm's error=0.1765\n",
- "[flaml.automl: 08-22 21:09:28] {1358} INFO - iteration 18, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:28] {1515} INFO - at 11.0s,\tbest lgbm's error=0.1765,\tbest lgbm's error=0.1765\n",
- "[flaml.automl: 08-22 21:09:28] {1358} INFO - iteration 19, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:29] {1515} INFO - at 11.4s,\tbest lgbm's error=0.1765,\tbest lgbm's error=0.1765\n",
- "[flaml.automl: 08-22 21:09:29] {1358} INFO - iteration 20, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:31] {1515} INFO - at 13.9s,\tbest lgbm's error=0.1765,\tbest lgbm's error=0.1765\n",
- "[flaml.automl: 08-22 21:09:31] {1358} INFO - iteration 21, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:32] {1515} INFO - at 14.9s,\tbest lgbm's error=0.1693,\tbest lgbm's error=0.1693\n",
- "[flaml.automl: 08-22 21:09:32] {1358} INFO - iteration 22, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:33] {1515} INFO - at 15.8s,\tbest lgbm's error=0.1693,\tbest lgbm's error=0.1693\n",
- "[flaml.automl: 08-22 21:09:33] {1358} INFO - iteration 23, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:34] {1515} INFO - at 16.9s,\tbest lgbm's error=0.1693,\tbest lgbm's error=0.1693\n",
- "[flaml.automl: 08-22 21:09:34] {1358} INFO - iteration 24, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:35] {1515} INFO - at 18.1s,\tbest lgbm's error=0.1693,\tbest lgbm's error=0.1693\n",
- "[flaml.automl: 08-22 21:09:35] {1358} INFO - iteration 25, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:38] {1515} INFO - at 20.9s,\tbest lgbm's error=0.1685,\tbest lgbm's error=0.1685\n",
- "[flaml.automl: 08-22 21:09:38] {1358} INFO - iteration 26, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:39] {1515} INFO - at 22.0s,\tbest lgbm's error=0.1685,\tbest lgbm's error=0.1685\n",
- "[flaml.automl: 08-22 21:09:39] {1358} INFO - iteration 27, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:47] {1515} INFO - at 30.0s,\tbest lgbm's error=0.1685,\tbest lgbm's error=0.1685\n",
- "[flaml.automl: 08-22 21:09:47] {1358} INFO - iteration 28, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:50] {1515} INFO - at 32.7s,\tbest lgbm's error=0.1685,\tbest lgbm's error=0.1685\n",
- "[flaml.automl: 08-22 21:09:50] {1358} INFO - iteration 29, current learner lgbm\n",
- "[flaml.automl: 08-22 21:09:51] {1515} INFO - at 33.6s,\tbest lgbm's error=0.1685,\tbest lgbm's error=0.1685\n",
- "[flaml.automl: 08-22 21:09:51] {1358} INFO - iteration 30, current learner lgbm\n",
- "[flaml.automl: 08-22 21:10:09] {1515} INFO - at 52.0s,\tbest lgbm's error=0.1685,\tbest lgbm's error=0.1685\n",
- "[flaml.automl: 08-22 21:10:09] {1358} INFO - iteration 31, current learner lgbm\n",
- "[flaml.automl: 08-22 21:10:11] {1515} INFO - at 54.1s,\tbest lgbm's error=0.1685,\tbest lgbm's error=0.1685\n",
- "[flaml.automl: 08-22 21:10:11] {1358} INFO - iteration 32, current learner lgbm\n",
- "[flaml.automl: 08-22 21:10:17] {1515} INFO - at 59.6s,\tbest lgbm's error=0.1609,\tbest lgbm's error=0.1609\n",
- "[flaml.automl: 08-22 21:10:17] {1358} INFO - iteration 33, current learner lgbm\n",
- "[flaml.automl: 08-22 21:10:22] {1515} INFO - at 65.1s,\tbest lgbm's error=0.1609,\tbest lgbm's error=0.1609\n",
- "[flaml.automl: 08-22 21:10:22] {1358} INFO - iteration 34, current learner lgbm\n",
- "[flaml.automl: 08-22 21:10:26] {1515} INFO - at 68.7s,\tbest lgbm's error=0.1609,\tbest lgbm's error=0.1609\n",
- "[flaml.automl: 08-22 21:10:26] {1358} INFO - iteration 35, current learner lgbm\n",
- "[flaml.automl: 08-22 21:10:45] {1515} INFO - at 88.0s,\tbest lgbm's error=0.1609,\tbest lgbm's error=0.1609\n",
- "[flaml.automl: 08-22 21:10:45] {1358} INFO - iteration 36, current learner lgbm\n",
- "[flaml.automl: 08-22 21:10:46] {1515} INFO - at 88.9s,\tbest lgbm's error=0.1609,\tbest lgbm's error=0.1609\n",
- "[flaml.automl: 08-22 21:10:46] {1358} INFO - iteration 37, current learner lgbm\n",
- "[flaml.automl: 08-22 21:10:54] {1515} INFO - at 96.6s,\tbest lgbm's error=0.1573,\tbest lgbm's error=0.1573\n",
- "[flaml.automl: 08-22 21:10:54] {1358} INFO - iteration 38, current learner lgbm\n",
- "[flaml.automl: 08-22 21:10:57] {1515} INFO - at 99.6s,\tbest lgbm's error=0.1573,\tbest lgbm's error=0.1573\n",
- "[flaml.automl: 08-22 21:10:57] {1358} INFO - iteration 39, current learner lgbm\n",
- "[flaml.automl: 08-22 21:11:57] {1515} INFO - at 160.1s,\tbest lgbm's error=0.1573,\tbest lgbm's error=0.1573\n",
- "[flaml.automl: 08-22 21:11:57] {1358} INFO - iteration 40, current learner lgbm\n",
- "[flaml.automl: 08-22 21:11:59] {1515} INFO - at 161.4s,\tbest lgbm's error=0.1573,\tbest lgbm's error=0.1573\n",
- "[flaml.automl: 08-22 21:11:59] {1358} INFO - iteration 41, current learner lgbm\n",
- "[flaml.automl: 08-22 21:12:00] {1515} INFO - at 162.5s,\tbest lgbm's error=0.1573,\tbest lgbm's error=0.1573\n",
- "[flaml.automl: 08-22 21:12:00] {1358} INFO - iteration 42, current learner lgbm\n",
- "[flaml.automl: 08-22 21:12:35] {1515} INFO - at 197.7s,\tbest lgbm's error=0.1535,\tbest lgbm's error=0.1535\n",
- "[flaml.automl: 08-22 21:12:35] {1358} INFO - iteration 43, current learner lgbm\n",
- "[flaml.automl: 08-22 21:13:09] {1515} INFO - at 231.6s,\tbest lgbm's error=0.1535,\tbest lgbm's error=0.1535\n",
- "[flaml.automl: 08-22 21:13:09] {1592} INFO - selected model: LGBMRegressor(colsample_bytree=0.6513228229604555,\n",
- " learning_rate=0.011556686284183076, max_bin=512,\n",
- " min_child_samples=9, n_estimators=2120, num_leaves=92,\n",
- " objective='regression', reg_alpha=0.024999216167840198,\n",
- " reg_lambda=0.01918323581702806, verbose=-1)\n",
- "[flaml.automl: 08-22 21:13:16] {1633} INFO - retrain lgbm for 6.6s\n",
- "[flaml.automl: 08-22 21:13:16] {1636} INFO - retrained model: LGBMRegressor(colsample_bytree=0.6513228229604555,\n",
- " learning_rate=0.011556686284183076, max_bin=512,\n",
- " min_child_samples=9, n_estimators=2120, num_leaves=92,\n",
- " objective='regression', reg_alpha=0.024999216167840198,\n",
- " reg_lambda=0.01918323581702806, verbose=-1)\n",
- "[flaml.automl: 08-22 21:13:16] {1199} INFO - fit succeeded\n",
- "[flaml.automl: 08-22 21:13:16] {1200} INFO - Time taken to find the best model: 197.68836307525635\n",
- "[flaml.automl: 08-22 21:13:16] {1205} WARNING - Time taken to find the best model is 82% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n"
+ "[flaml.automl: 09-29 23:10:08] {1446} INFO - Data split method: uniform\n",
+ "[flaml.automl: 09-29 23:10:08] {1450} INFO - Evaluation method: cv\n",
+ "[flaml.automl: 09-29 23:10:08] {1496} INFO - Minimizing error metric: 1-r2\n",
+ "[flaml.automl: 09-29 23:10:08] {1533} INFO - List of ML learners in AutoML Run: ['lgbm']\n",
+ "[flaml.automl: 09-29 23:10:08] {1763} INFO - iteration 0, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:10:08] {1880} INFO - Estimated sufficient time budget=3832s. Estimated necessary time budget=4s.\n",
+ "[flaml.automl: 09-29 23:10:08] {1952} INFO - at 0.4s,\testimator lgbm's best error=0.7383,\tbest estimator lgbm's best error=0.7383\n",
+ "[flaml.automl: 09-29 23:10:08] {1763} INFO - iteration 1, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:10:08] {1952} INFO - at 0.6s,\testimator lgbm's best error=0.4774,\tbest estimator lgbm's best error=0.4774\n",
+ "[flaml.automl: 09-29 23:10:08] {1763} INFO - iteration 2, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:10:09] {1952} INFO - at 0.8s,\testimator lgbm's best error=0.4774,\tbest estimator lgbm's best error=0.4774\n",
+ "[flaml.automl: 09-29 23:10:09] {1763} INFO - iteration 3, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:10:09] {1952} INFO - at 0.9s,\testimator lgbm's best error=0.2985,\tbest estimator lgbm's best error=0.2985\n",
+ "[flaml.automl: 09-29 23:10:09] {1763} INFO - iteration 4, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:10:09] {1952} INFO - at 1.3s,\testimator lgbm's best error=0.2337,\tbest estimator lgbm's best error=0.2337\n",
+ "[flaml.automl: 09-29 23:10:09] {1763} INFO - iteration 5, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:10:09] {1952} INFO - at 1.5s,\testimator lgbm's best error=0.2337,\tbest estimator lgbm's best error=0.2337\n",
+ "[flaml.automl: 09-29 23:10:09] {1763} INFO - iteration 6, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:10:10] {1952} INFO - at 2.4s,\testimator lgbm's best error=0.2219,\tbest estimator lgbm's best error=0.2219\n",
+ "[flaml.automl: 09-29 23:10:10] {1763} INFO - iteration 7, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:10:11] {1952} INFO - at 2.9s,\testimator lgbm's best error=0.2219,\tbest estimator lgbm's best error=0.2219\n",
+ "[flaml.automl: 09-29 23:10:11] {1763} INFO - iteration 8, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:10:12] {1952} INFO - at 4.2s,\testimator lgbm's best error=0.1764,\tbest estimator lgbm's best error=0.1764\n",
+ "[flaml.automl: 09-29 23:10:12] {1763} INFO - iteration 9, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:10:16] {1952} INFO - at 8.6s,\testimator lgbm's best error=0.1630,\tbest estimator lgbm's best error=0.1630\n",
+ "[flaml.automl: 09-29 23:10:16] {1763} INFO - iteration 10, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:10:18] {1952} INFO - at 10.2s,\testimator lgbm's best error=0.1630,\tbest estimator lgbm's best error=0.1630\n",
+ "[flaml.automl: 09-29 23:10:18] {1763} INFO - iteration 11, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:10:32] {1952} INFO - at 24.4s,\testimator lgbm's best error=0.1630,\tbest estimator lgbm's best error=0.1630\n",
+ "[flaml.automl: 09-29 23:10:32] {1763} INFO - iteration 12, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:10:34] {1952} INFO - at 26.0s,\testimator lgbm's best error=0.1630,\tbest estimator lgbm's best error=0.1630\n",
+ "[flaml.automl: 09-29 23:10:34] {1763} INFO - iteration 13, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:10:43] {1952} INFO - at 35.5s,\testimator lgbm's best error=0.1564,\tbest estimator lgbm's best error=0.1564\n",
+ "[flaml.automl: 09-29 23:10:43] {1763} INFO - iteration 14, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:10:49] {1952} INFO - at 40.7s,\testimator lgbm's best error=0.1564,\tbest estimator lgbm's best error=0.1564\n",
+ "[flaml.automl: 09-29 23:10:49] {1763} INFO - iteration 15, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:10:52] {1952} INFO - at 43.8s,\testimator lgbm's best error=0.1564,\tbest estimator lgbm's best error=0.1564\n",
+ "[flaml.automl: 09-29 23:10:52] {1763} INFO - iteration 16, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:11:08] {1952} INFO - at 59.9s,\testimator lgbm's best error=0.1564,\tbest estimator lgbm's best error=0.1564\n",
+ "[flaml.automl: 09-29 23:11:08] {1763} INFO - iteration 17, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:11:20] {1952} INFO - at 72.6s,\testimator lgbm's best error=0.1564,\tbest estimator lgbm's best error=0.1564\n",
+ "[flaml.automl: 09-29 23:11:20] {1763} INFO - iteration 18, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:11:24] {1952} INFO - at 75.9s,\testimator lgbm's best error=0.1564,\tbest estimator lgbm's best error=0.1564\n",
+ "[flaml.automl: 09-29 23:11:24] {1763} INFO - iteration 19, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:11:27] {1952} INFO - at 79.3s,\testimator lgbm's best error=0.1564,\tbest estimator lgbm's best error=0.1564\n",
+ "[flaml.automl: 09-29 23:11:27] {1763} INFO - iteration 20, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:11:51] {1952} INFO - at 102.8s,\testimator lgbm's best error=0.1564,\tbest estimator lgbm's best error=0.1564\n",
+ "[flaml.automl: 09-29 23:11:51] {1763} INFO - iteration 21, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:11:53] {1952} INFO - at 105.5s,\testimator lgbm's best error=0.1564,\tbest estimator lgbm's best error=0.1564\n",
+ "[flaml.automl: 09-29 23:11:53] {1763} INFO - iteration 22, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:12:38] {1952} INFO - at 149.8s,\testimator lgbm's best error=0.1564,\tbest estimator lgbm's best error=0.1564\n",
+ "[flaml.automl: 09-29 23:12:38] {1763} INFO - iteration 23, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:12:38] {1952} INFO - at 150.6s,\testimator lgbm's best error=0.1564,\tbest estimator lgbm's best error=0.1564\n",
+ "[flaml.automl: 09-29 23:12:38] {1763} INFO - iteration 24, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:13:19] {1952} INFO - at 191.0s,\testimator lgbm's best error=0.1564,\tbest estimator lgbm's best error=0.1564\n",
+ "[flaml.automl: 09-29 23:13:19] {1763} INFO - iteration 25, current learner lgbm\n",
+ "[flaml.automl: 09-29 23:14:08] {1952} INFO - at 240.6s,\testimator lgbm's best error=0.1564,\tbest estimator lgbm's best error=0.1564\n",
+ "[flaml.automl: 09-29 23:14:09] {2059} INFO - selected model: LGBMRegressor(colsample_bytree=0.8025848209352517,\n",
+ " learning_rate=0.09100963138990395, max_bin=255,\n",
+ " min_child_samples=42, n_estimators=363, num_leaves=216,\n",
+ " reg_alpha=0.001113000336715291, reg_lambda=76.50614276906414,\n",
+ " verbose=-1)\n",
+ "[flaml.automl: 09-29 23:14:10] {2122} INFO - retrain lgbm for 1.9s\n",
+ "[flaml.automl: 09-29 23:14:10] {2128} INFO - retrained model: LGBMRegressor(colsample_bytree=0.8025848209352517,\n",
+ " learning_rate=0.09100963138990395, max_bin=255,\n",
+ " min_child_samples=42, n_estimators=363, num_leaves=216,\n",
+ " reg_alpha=0.001113000336715291, reg_lambda=76.50614276906414,\n",
+ " verbose=-1)\n",
+ "[flaml.automl: 09-29 23:14:10] {1557} INFO - fit succeeded\n",
+ "[flaml.automl: 09-29 23:14:10] {1558} INFO - Time taken to find the best model: 35.49429440498352\n"
]
}
],
@@ -283,9 +247,9 @@
"output_type": "stream",
"name": "stdout",
"text": [
- "Best hyperparmeter config: {'n_estimators': 2120, 'num_leaves': 92, 'min_child_samples': 9, 'learning_rate': 0.011556686284183076, 'log_max_bin': 10, 'colsample_bytree': 0.6513228229604555, 'reg_alpha': 0.024999216167840198, 'reg_lambda': 0.01918323581702806}\n",
- "Best r2 on validation data: 0.8465\n",
- "Training duration of best run: 35.16 s\n"
+ "Best hyperparmeter config: {'n_estimators': 363, 'num_leaves': 216, 'min_child_samples': 42, 'learning_rate': 0.09100963138990395, 'log_max_bin': 8, 'colsample_bytree': 0.8025848209352517, 'reg_alpha': 0.001113000336715291, 'reg_lambda': 76.50614276906414}\n",
+ "Best r2 on validation data: 0.8436\n",
+ "Training duration of best run: 9.511 s\n"
]
}
],
@@ -300,18 +264,18 @@
"cell_type": "code",
"execution_count": 6,
"source": [
- "automl.model.estimator\n"
+ "automl.model.estimator"
],
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
- "LGBMRegressor(colsample_bytree=0.6513228229604555,\n",
- " learning_rate=0.011556686284183076, max_bin=512,\n",
- " min_child_samples=9, n_estimators=2120, num_leaves=92,\n",
- " objective='regression', reg_alpha=0.024999216167840198,\n",
- " reg_lambda=0.01918323581702806, verbose=-1)"
+ "LGBMRegressor(colsample_bytree=0.8025848209352517,\n",
+ " learning_rate=0.09100963138990395, max_bin=255,\n",
+ " min_child_samples=42, n_estimators=363, num_leaves=216,\n",
+ " reg_alpha=0.001113000336715291, reg_lambda=76.50614276906414,\n",
+ " verbose=-1)"
]
},
"metadata": {},
@@ -345,11 +309,10 @@
{
"output_type": "display_data",
"data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAdUAAAD4CAYAAAC6/HyrAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAfTklEQVR4nO3de3hdVZ3/8feHtLTcTIEiTyxIADtcW0IbkPvgDRX9IUi1CgMF52eHy6Diw2gVn7HgOAJlRkRRqDNIueqvXISnCJUfWOgPKSWhbdICBaRVqQiCEi4VhPb7+2Ov0E0ml3OSnZyTk8/rec6Tvddee63vOrvNN2vtnRNFBGZmZjZwm1U6ADMzs1rhpGpmZlYQJ1UzM7OCOKmamZkVxEnVzMysIKMqHYANrvHjx0djY2OlwzAzG1ZaW1ufj4gdyj3PSbXGNTY20tLSUukwzMyGFUm/7c95Xv41MzMriJOqmZlZQZxUzczMCuKkamZmVhAnVTMzs4I4qZqZmRXESdXMzKwgTqpmZmYF8Yc/1Lj2dR00zrq90mGYmfVo7QUfq3QIhfFM1czMrCBOqmZmZgVxUjUzMyuIk6qZmVlBnFTNzMwK4qRqZmZWECfVHEmvDEKbx0ialbaPlbR3P9pYJKm56NjMzKxYTqqDLCJui4gL0u6xQNlJ1czMhgcn1W4oM0fSSkntkqan8iPTrPFGSY9Juk6S0rGjU1mrpEslLUjlp0j6gaRDgGOAOZKWS9o9PwOVNF7S2rS9haSfSnpU0i3AFrnYjpL0gKSHJc2XtPXQvjtmZtYTf6JS9z4JNAH7AeOBhyTdl47tD+wD/AG4HzhUUgtwBXBERKyRdEPXBiPi15JuAxZExI0AKR9353RgfUTsJWky8HCqPx74BvDBiHhV0leBLwPn50+WNBOYCVD3jh36+RaYmVm5PFPt3mHADRGxISKeBe4FDkjHlkbE0xGxEVgONAJ7Ak9FxJpU538k1TIdAVwLEBFtQFsqP4hs+fh+ScuBGcAuXU+OiLkR0RwRzXVb1g8wFDMzK5VnquV7Pbe9gYG9h2+y6QebsSXUF3BXRHx2AH2amdkg8Uy1e4uB6ZLqJO1ANnNc2kv91cBukhrT/vQe6r0MbJPbXwtMTdvTcuX3AScASNoXmJzKl5AtN78nHdtK0t+VMB4zMxsCTqrdu4VsyXUFcA/wlYj4Y0+VI+KvwBnAnZJayZJnRzdVfwr8i6RlknYHLgZOl7SM7N5tpx8BW0t6lOx+aWvq50/AKcANktqAB8iWns3MrAooIiodQ02QtHVEvJKeBr4MeCIivlvpuMY0TIyGGZdUOgwzsx5V459+k9QaEWV/PoBnqsX5fHp4aBVQT/Y0sJmZjSB+UKkgaVZa8ZmpmZlVjmeqZmZmBXFSNTMzK4iTqpmZWUF8T7XGTZpQT0sVPllnZlaLPFM1MzMriJOqmZlZQZxUzczMCuKkamZmVhA/qFTj2td10Djr9kqHYWY2pCr10YeeqZqZmRXESdXMzKwgTqpmZmYFcVI1MzMriJOqmZlZQZxUzczMCuKkWgZJr/RxfJykM3L775J0Y9puknR0P/qcLemc8qM1M7Oh5qRarHHAW0k1Iv4QEdPSbhNQdlI1M7Phw0m1HyRtLeluSQ9Lapf0iXToAmB3ScslzZHUKGmlpM2B84Hp6dj0rjPQVK8xbZ8r6XFJ/w/YI1dnd0l3SmqVtFjSnkM2aDMz65M/Ual/XgOOi4iXJI0Hlki6DZgF7BsRTQCdSTIi/ibpX4HmiPjndGx2dw1Lmgp8hmxmOwp4GGhNh+cCp0XEE5LeC/wQeH83bcwEZgLUvWOHIsZrZmYlcFLtHwH/LukIYCMwAdixoLYPB26JiPUAKVkjaWvgEGC+pM66Y7prICLmkiVgxjRMjILiMjOzPjip9s+JwA7A1Ih4Q9JaYGyZbbzJ25ff+zp/M+DFzlmwmZlVH99T7Z964LmUUN8H7JLKXwa26eGcrsfWAlMAJE0Bdk3l9wHHStpC0jbA/wKIiJeANZI+lc6RpP2KG5KZmQ2Uk2r/XAc0S2oHTgYeA4iIF4D700NHc7qc8ytg784HlYCbgO0krQL+GXg8tfEw8DNgBXAH8FCujROBf5S0AlgFfAIzM6saivAtt1o2pmFiNMy4pNJhmJkNqYH+6TdJrRHRXO55nqmamZkVxEnVzMysIE6qZmZmBXFSNTMzK4h/T7XGTZpQT8sAb9ibmVlpPFM1MzMriJOqmZlZQZxUzczMCuKkamZmVhA/qFTj2td10Djr9kqHYVa1BvrJO2Z5nqmamZkVxEnVzMysIE6qZmZmBXFSNTMzK4iTqpmZWUGcVM3MzAoyIpKqpEZJKyvQ7ytl1p8t6ZxuyisSv5mZlWdEJFUzM7OhMJKSap2kH0taJemXkraQ1CRpiaQ2SbdI2hZA0iJJzWl7vKS1aXsfSUslLU/nTEzl/5Arv0JSXWenkr4taUXqZ8dU1ijpntTG3ZLe3TVYSVPTeSuAM3Pl3cZgZmaVN5KS6kTgsojYB3gROB64GvhqREwG2oFv9tHGacD3IqIJaAaelrQXMB04NJVvAE5M9bcClkTEfsB9wOdT+feBeanf64BLu+nrJ8BZ6dxeY+h6oqSZkloktWxY39HHkMzMrCgjKamuiYjlabsV2B0YFxH3prJ5wBF9tPEA8HVJXwV2iYi/Ah8ApgIPSVqe9ndL9f8GLMj12Zi2DwauT9vXAIflO5E0LsV2X65ObzG8TUTMjYjmiGiu27K+jyGZmVlRRlJSfT23vQEY10vdN9n03oztLIyI64FjgL8Cv5D0fkBks86m9NojImanU96IiMj1OeDPWu4hBjMzqwIjKal21QH8RdLhaf8koHPWupZs9gkwrfMESbsBT0XEpcCtwGTgbmCapHemOttJ2qWPvn8NfCZtnwgszh+MiBeBFyUdlqvTWwxmZlYFRnJSBZgBzJHUBjQB56fyi4HTJS0DxufqfxpYmZZ59wWujohHgG8Av0zt3AU09NHvWcCpqf5JwBe7qXMqcFnqS73FUPJozcxsUGnT6qTVojENE6NhxiWVDsOsavlPv1l3JLVGRHO55430maqZmVlhnFTNzMwK4qRqZmZWECdVMzOzggz49yatuk2aUE+LH8QwMxsSnqmamZkVxEnVzMysIE6qZmZmBXFSNTMzK4gfVKpx7es6aJx1e6XDMLNhyp84VR7PVM3MzAripGpmZlYQJ1UzM7OCOKmamZkVxEnVzMysIE6qZmZmBXFSHQSSGiWtLKHOCbn9ZkmXDn50ZmY2WJxUK6cReCupRkRLRHyhcuGYmdlAjcikmmaJj0m6TtKjkm6UtKWkD0haJqld0pWSxqT6ayVdlMqXSnpPKr9K0rRcu6/00NdiSQ+n1yHp0AXA4ZKWSzpb0pGSFqRztpP0c0ltkpZImpzKZ6e4Fkl6SpKTsJlZFRmRSTXZA/hhROwFvAR8GbgKmB4Rk8g+ber0XP2OVP4D4JIy+nkO+FBETAGmA51LvLOAxRHRFBHf7XLOecCyiJgMfB24OndsT+DDwIHANyWN7tqhpJmSWiS1bFjfUUaoZmY2ECM5qf4+Iu5P29cCHwDWRMTjqWwecESu/g25rweX0c9o4MeS2oH5wN4lnHMYcA1ARNwDbC/pHenY7RHxekQ8T5awd+x6ckTMjYjmiGiu27K+jFDNzGwgRvJn/0aX/ReB7Uus37n9JukHE0mbAZt3c97ZwLPAfqnua/0JNuf13PYGRvY1NDOrKiN5pvpuSZ0zzhOAFqCx834pcBJwb67+9NzXB9L2WmBq2j6GbFbaVT3wTERsTG3WpfKXgW16iG0xcCKApCOB5yPipZJGZWZmFTOSZzmrgTMlXQk8AnwBWALMlzQKeAi4PFd/W0ltZDPFz6ayHwO3SloB3Am82k0/PwRuknRylzptwIZ07lXAstw5s4ErU3/rgRkDG6qZmQ0FRXRdBa19khqBBRGxb4n11wLN6T7msDKmYWI0zCjnuSozs01G6p9+k9QaEc3lnjeSl3/NzMwKNSKXfyNiLVDSLDXVbxy0YMzMrGZ4pmpmZlYQJ1UzM7OCOKmamZkVZETeUx1JJk2op2WEPr1nZjbUPFM1MzMriJOqmZlZQZxUzczMCuKkamZmVhA/qFTj2td10Djr9kqHYVb1RurH8VmxPFM1MzMriJOqmZlZQZxUzczMCuKkamZmVhAnVTMzs4I4qZqZmRWk6pKqpHGSzuijTqOkE0poq1HSyl6OnyLpB/2Js4jzzcystlRdUgXGAb0mVaAR6DOpVook//6vmdkIVI1J9QJgd0nLJc1Jr5WS2iVNz9U5PNU5O81IF0t6OL0OKaO/nSUtkvSEpG92Fkr6B0lLUx9XSKpL5adKelzSUuDQXP2rJF0u6UHgIklNkpZIapN0i6RtU72eyhdJ+q6kFkmPSjpA0s0prn9LdbaSdLukFek9mY6ZmVWNakyqs4DfREQTsARoAvYDPgjMkdSQ6iyOiKaI+C7wHPChiJgCTAcuLaO/A4HjgcnApyQ1S9ortXNoimMDcGLq+zyyZHoYsHeXtnYCDomILwNXA1+NiMlAO9CZsHsqB/hbRDQDlwO3AmcC+wKnSNoe+Ajwh4jYLyL2Be7sbkCSZqbk3LJhfUcZb4WZmQ1EtS9THgbcEBEbgGcl3QscALzUpd5o4AeSOhPg35XRx10R8QKApJtTn28CU4GHJAFsQZa43wssiog/pfo/69LX/IjYIKkeGBcR96byecD8nspz59+WvrYDqyLimdTPU8DOqfw/JF0ILIiIxd0NKCLmAnMBxjRMjDLeCzMzG4BqT6qlOht4lmxGuxnwWhnndk06AQiYFxFfyx+QdGwfbb1aRr/deT193Zjb7twfFRGPS5oCHA38m6S7I+L8AfZpZmYFqcbl35eBbdL2YmC6pDpJOwBHAEu71AGoB56JiI3ASUBdGf19SNJ2krYAjgXuB+4Gpkl6J0A6vgvwIPD3kraXNBr4VHcNRkQH8BdJh6eik4B7eyovNVBJ7wLWR8S1wBxgShnjNDOzQVZ1M9WIeEHS/elXYe4A2oAVZDPIr0TEHyW9AGyQtAK4CvghcJOkk8nuM5YzY1wK3ER2P/TaiGgBkPQN4JeSNgPeAM6MiCWSZgMPAC8Cy3tpdwZwuaQtgaeAU/soL8UksvvKG1NMp5dxrpmZDTJF+JZbLRvTMDEaZlxS6TDMqp7/9JvlSWpND46WpRqXf83MzIalqlv+HQySPgxc2KV4TUQcV4l4zMysNo2IpBoRC4GFlY7DzMxqm5d/zczMCjIiZqoj2aQJ9bT4AQwzsyHhmaqZmVlBnFTNzMwK4qRqZmZWECdVMzOzgvhBpRrXvq6Dxlm3VzoMM0v8yU21zTNVMzOzgjipmpmZFcRJ1czMrCBOqmZmZgVxUjUzMyuIk6qZmVlBnFTNzMwKUtNJVdI4SWf0UadR0gkltNUoaWVx0ZmZWa2p6aQKjAN6TapAI9BnUi2HJH+ohpnZCFTrSfUCYHdJyyXNSa+VktolTc/VOTzVOTvNSBdLeji9DimlI0mnSLpN0j3A3ZK2k/RzSW2SlkianOr1VD5b0rzU928lfVLSRSnWOyWNTvUukPRIOv/iHmKZKalFUsuG9R0DfQ/NzKxEtT6jmgXsGxFNko4HTgP2A8YDD0m6L9U5JyI+DiBpS+BDEfGapInADUBzif1NASZHxJ8lfR9YFhHHSno/cDXQBJzXQznA7sD7gL2BB4DjI+Irkm4BPiZpMXAcsGdEhKRx3QUREXOBuQBjGiZGqW+WmZkNTK3PVPMOA26IiA0R8SxwL3BAN/VGAz+W1A7MJ0twpborIv6c6+8agIi4B9he0jt6KQe4IyLeANqBOuDOVN5OtkzdAbwG/LekTwLry4jNzMwG2UhKqqU6G3iWbEbbDGxexrmvDrDv1wEiYiPwRkR0zjI3AqMi4k3gQOBG4ONsSrpmZlYFaj2pvgxsk7YXA9Ml1UnaATgCWNqlDkA98ExKbCeRzRj7YzFwIoCkI4HnI+KlXsr7JGlroD4ifkGW/PfrZ2xmZjYIavqeakS8IOn+9KswdwBtwAoggK9ExB8lvQBskLQCuAr4IXCTpJPJZoL9nX3OBq6U1Ea2TDujj/JSbAPcKmksIODL/YzNzMwGgTatMFotGtMwMRpmXFLpMMws8d9THR4ktUZEqQ+pvqXWl3/NzMyGTE0v/w4GSR8GLuxSvCYijqtEPGZmVj2cVMsUEQuBhZWOw8zMqo+Tao2bNKGeFt/DMTMbEr6namZmVhAnVTMzs4I4qZqZmRXESdXMzKwgflCpxrWv66Bx1u2VDsOsZvjDG6w3nqmamZkVxEnVzMysIE6qZmZmBXFSNTMzK4iTqpmZWUGcVM3MzAripGpmZlaQPpOqpEZJKwcrAEm/Hqy2Byo/dknNki6tdExmZla9Kv7hDxFxSKVjKEVEtAAtlY7DzMyqV6nLv3WSfixplaRfStpCUpOkJZLaJN0iaVsASYskNaft8ZLWpu19JC2VtDydMzGVv5K+HpnOvVHSY5Kuk6R07OhU1irpUkkLegpU0mxJ8yQtlvRbSZ+UdJGkdkl3Shqd6k2VdG9qc6Gkhlz5CkkrgDNz7R7Z2a+kAyU9IGmZpF9L2iOVnyLp5tTPE5Iu6u1NlfQjSS3pfT0vV97teCVtJenK9D4uk/SJHtqdmdpt2bC+o7cQzMysQKUm1YnAZRGxD/AicDxwNfDViJgMtAPf7KON04DvRUQT0Aw83U2d/YEvAXsDuwGHShoLXAF8NCKmAjuUEO/uwPuBY4BrgV9FxCTgr8DHUmL9PjAttXkl8O107k+AsyJiv17afww4PCL2B/4V+PfcsSZgOjAJmC5p517aOTcimoHJwN9LmtzHeM8F7omIA4H3AXMkbdW10YiYGxHNEdFct2V9L92bmVmRSl3+XRMRy9N2K1nSGhcR96ayecD8Ptp4ADhX0k7AzRHxRDd1lkbE0wCSlgONwCvAUxGxJtW5AZjZR193RMQbktqBOuDOVN6e2twD2Be4K02G64BnJI1L47ov1b8G+Gg37dcD89JsO4DRuWN3R0RHGsMjwC7A73uI89OSZpJdhwayHyY262W8RwHHSDon7Y8F3g082vvbYWZmQ6HUpPp6bnsDMK6Xum+yaQY8trMwIq6X9CDwMeAXkv4pIu7po5/+3vN9PfW5UdIbERGpfGNqU8CqiDg4f1JKqqX4Ftns9zhJjcCirn0nPY5B0q7AOcABEfEXSVeRe796IOD4iFhdYpxmZjaE+vsrNR3AXyQdnvZPAjpnrWuBqWl7WucJknYjm4FdCtxKtuRZitXAbil5Qba0OlCrgR0kHZxiGy1pn4h4EXhR0mGp3ok9nF8PrEvbp/QzhncArwIdknZk04y4t/EuBM7K3Wvev599m5nZIBjI76nOILun10Z2H/H8VH4xcLqkZcD4XP1PAyvTsu6+ZPdk+xQRfwXOAO6U1Aq8TJbU+y0i/kaW8C9MDyQtBzqfQj4VuCzFqR6auAj4Thpjv2bTEbECWEZ2f/Z64P5U3tt4v0W21NwmaVXaNzOzKqFNK6PVS9LWEfFKmqFdBjwREd+tdFyDpcjxjmmYGA0zLik2QLMRzH9PdWSQ1JoeJC3LcPlEpc+nmeMqsqXXKyocz2AbaeM1M6sJFf/wh1KkWdrbZmqSTgW+2KXq/RFxJlUmPaA1pkvxSRHR3l397sZrZmbVb1gk1e5ExE/Ifqe06kXEeysdg5mZDb7hsvxrZmZW9YbtTNVKM2lCPS1+sMLMbEh4pmpmZlYQJ1UzM7OCOKmamZkVxEnVzMysIH5Qqca1r+ugcdbtlQ7DzKqYPyWqOJ6pmpmZFcRJ1czMrCBOqmZmZgVxUjUzMyuIk6qZmVlBnFTNzMwK4qRqZmZWkJpNqpIWSWpO27+QNK7Atk+TdHJR7ZmZWW0YER/+EBFHF9ze5UW2Z2ZmtaGqZqqSGiU9JukqSY9Luk7SByXdL+kJSQdK2krSlZKWSlom6RPp3C0k/VTSo5JuAbbItbtW0vi0/XNJrZJWSZqZq/OKpG9LWiFpiaQde4lztqRz0vYiSRemeB6XdHgqr5N0saSVktoknZXKP5Dibk/jGJOL8TuSlktqkTRF0kJJv5F0Wq7vf5H0UGrzvB7im5naaNmwvmMAV8TMzMpRVUk1eQ/wH8Ce6XUCcBhwDvB14Fzgnog4EHgfMEfSVsDpwPqI2Av4JjC1h/Y/FxFTgWbgC5K2T+VbAUsiYj/gPuDzZcQ8KsXzpdQ3wEygEWiKiMnAdZLGAlcB0yNiEtlKwem5dn4XEU3A4lRvGnAQcB6ApKOAicCBQBMwVdIRXYOJiLkR0RwRzXVb1pcxDDMzG4hqTKprIqI9IjYCq4C7IyKAdrIkdRQwS9JyYBEwFng3cARwLUBEtAFtPbT/BUkrgCXAzmRJCuBvwIK03Zr6KtXN3Zz3QeCKiHgzxfRnYI80vsdTnXkp7k63pa/twIMR8XJE/Al4Pd0TPiq9lgEPk/3QMREzM6sK1XhP9fXc9sbc/kayeDcAx0fE6vxJkvpsWNKRZMnu4IhYL2kRWVIGeCMlb1If5bw3nTGWe15P7eTH3bk/ChDwnYi4YgB9mJnZIKnGmWpfFgJnKWVRSfun8vvIloqRtC8wuZtz64G/pIS6J9nS6mC5C/gnSaNSTNsBq4FGSe9JdU4C7i2jzYXA5yRtndqcIOmdBcZsZmYDMByT6reA0UCbpFVpH+BHwNaSHgXOJ1uK7epOYFSqcwHZEvBg+S/gdynOFcAJEfEacCowX1I72Qy05CeJI+KXwPXAA+n8G4FtCo/czMz6RZtWPK0WjWmYGA0zLql0GGZWxfz3VP8nSa0R0VzuecNxpmpmZlaVqvFBpaoh6VzgU12K50fEtysRj5mZVTcn1V6k5OkEamZmJXFSrXGTJtTT4vslZmZDwvdUzczMCuKkamZmVhAnVTMzs4I4qZqZmRXESdXMzKwgTqpmZmYFcVI1MzMriJOqmZlZQZxUzczMCuK/UlPjJL1M9ndca8144PlKBzFIanVstTouqN2x1eq4oO+x7RIRO5TbqD+msPat7s+fL6p2klpqcVxQu2Or1XFB7Y6tVscFgzc2L/+amZkVxEnVzMysIE6qtW9upQMYJLU6LqjdsdXquKB2x1ar44JBGpsfVDIzMyuIZ6pmZmYFcVI1MzMriJNqjZL0EUmrJT0paVal4ymFpJ0l/UrSI5JWSfpiKt9O0l2Snkhft03lknRpGmObpCm5tmak+k9ImlGpMeVJqpO0TNKCtL+rpAdT/D+TtHkqH5P2n0zHG3NtfC2Vr5b04cqM5O0kjZN0o6THJD0q6eBauGaSzk7/DldKukHS2OF6zSRdKek5SStzZYVdI0lTJbWncy6VpAqOa076t9gm6RZJ43LHur0WPX2/7Ol69yoi/KqxF1AH/AbYDdgcWAHsXem4Soi7AZiStrcBHgf2Bi4CZqXyWcCFafto4A5AwEHAg6l8O+Cp9HXbtL1tFYzvy8D1wIK0/3+Az6Tty4HT0/YZwOVp+zPAz9L23ulajgF2Tde4rgrGNQ/432l7c2DccL9mwARgDbBF7lqdMlyvGXAEMAVYmSsr7BoBS1NdpXM/WsFxHQWMStsX5sbV7bWgl++XPV3vXmOq1D9avwb1H9rBwMLc/teAr1U6rn6M41bgQ2SfCNWQyhrIPtAC4Args7n6q9PxzwJX5MrfVq9CY9kJuBt4P7AgffN5Pvef/61rBiwEDk7bo1I9db2O+XoVHFc9WfJRl/Jhfc3IkurvUwIZla7Zh4fzNQMauySfQq5ROvZYrvxt9YZ6XF2OHQdcl7a7vRb08P2yt/+jvb28/FubOr8hdHo6lQ0baflsf+BBYMeIeCYd+iOwY9ruaZzVOP5LgK8AG9P+9sCLEfFm2s/H+Fb86XhHql+N49oV+BPwk7S0/V+StmKYX7OIWAdcDPwOeIbsGrRSG9esU1HXaELa7lpeDT5HNnOG8sfV2//RHjmpWtWRtDVwE/CliHgpfyyyHxmH1e+BSfo48FxEtFY6lkEwimz57UcRsT/wKtlS4luG6TXbFvgE2Q8N7wK2Aj5S0aAG0XC8Rn2RdC7wJnDdUPbrpFqb1gE75/Z3SmVVT9JosoR6XUTcnIqfldSQjjcAz6XynsZZbeM/FDhG0lrgp2RLwN8Dxknq/PztfIxvxZ+O1wMvUH3jguyn96cj4sG0fyNZkh3u1+yDwJqI+FNEvAHcTHYda+GadSrqGq1L213LK0bSKcDHgRPTDwxQ/rheoOfr3SMn1dr0EDAxPbm2OdmDE7dVOKY+pScG/xt4NCL+M3foNqDzScMZZPdaO8tPTk8rHgR0pOWshcBRkrZNM46jUllFRMTXImKniGgkuxb3RMSJwK+Aaala13F1jndaqh+p/DPpSdNdgYlkD4hUTET8Efi9pD1S0QeARxjm14xs2fcgSVumf5ed4xr21yynkGuUjr0k6aD0Xp2ca2vISfoI2a2WYyJife5QT9ei2++X6fr1dL17Vokb5n4N/ovsCb7HyZ5qO7fS8ZQY82FkS1BtwPL0Oprs3sbdwBPA/wW2S/UFXJbG2A4059r6HPBkep1a6bHl4jqSTU//7pb+Uz8JzAfGpPKxaf/JdHy33PnnpvGuZoiesCxhTE1AS7puPyd7MnTYXzPgPOAxYCVwDdlTo8PymgE3kN0bfoNsdeEfi7xGQHN6n34D/IAuD64N8bieJLtH2vk95PK+rgU9fL/s6Xr39vLHFJqZmRXEy79mZmYFcVI1MzMriJOqmZlZQZxUzczMCuKkamZmVhAnVTMzs4I4qZqZmRXk/wOt+9wyU2lLXwAAAABJRU5ErkJggg==",
"text/plain": [
""
- ],
- "image/svg+xml": "\n\n\n\n",
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAc0AAAD4CAYAAACOhb23AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAe0ElEQVR4nO3de5RcVZn38e+PJiRAoAMEWT0RacEMCCQ0SYuCIYM3VJyXi0TjwEDAWWa4iIqL0Yy4xoDjCERHBkUhvIMEQfQNF2GB3F4wkBcJoZsk3YkQQBJHI4IiacBIgOR5/zi7pSj7sqtv1V39+6xVq3fts8/ez67T6Sf7nFNVigjMzMysd9tUOwAzM7ORwknTzMwsk5OmmZlZJidNMzOzTE6aZmZmmbatdgA2uCZOnBiNjY3VDsPMbERpbW39Q0TsXl7vpFnjGhsbaWlpqXYYZmYjiqRfdVXv07NmZmaZnDTNzMwyOWmamZllctI0MzPL5KRpZmaWyUnTzMwsk5OmmZlZJidNMzOzTP5wgxrXvqGDxnm3VTsMM7Mhtf6CjwxKv15pmpmZZXLSNDMzy+SkaWZmlslJ08zMLJOTppmZWSYnTTMzs0xOmiUkvTQIfR4taV4qHytp/z70sURS80DHZmZmlXHSHGQRcUtEXJCeHgtUnDTNzGx4cNLsggoLJK2W1C5pdqo/Iq36rpf0mKRrJSltOyrVtUq6RNKtqf4USd+RdBhwNLBA0kpJ+5SuICVNlLQ+lbeX9CNJj0q6Cdi+JLYjJT0o6RFJiyWNH9pXx8xs9PInAnXto0ATcBAwEXhY0v1p28HAAcBvgQeAd0tqAS4HZkbEOknXlXcYET+XdAtwa0RcD5DybVdOBzZFxNslTQUeSe0nAl8G3h8Rf5L0ReDzwPmlO0uaC8wFqNt59z6+BGZmVs4rza7NAK6LiC0R8QxwH/COtG15RPwmIrYCK4FGYD/gqYhYl9r8VdKs0EzgGoCIaAPaUv27KE7vPiBpJTAH2Kt854hYGBHNEdFct0N9P0MxM7NOXmlWbnNJeQv9ew1f4/X/uIzLaC/g7oj4h36MaWZmfeSVZteWArMl1UnanWLlt7yH9muBvSU1puezu2n3IrBTyfP1wPRUnlVSfz9wAoCkA4GpqX4Zxengt6VtO0r624z5mJnZAHDS7NpNFKdEVwH3Al+IiN911zgi/gycAdwhqZUiOXZ00fRHwL9IWiFpH+AbwOmSVlBcO+30PWC8pEcprle2pnF+D5wCXCepDXiQ4tSwmZkNAUVEtWOoCZLGR8RL6W7aS4EnIuJb1Y5rbMPkaJhzcbXDMDMbUv39ajBJrRHxV++P90pz4Hwq3ZyzBqinuJvWzMxqiG8EGiBpVVn1laWZmQ0erzTNzMwyOWmamZllctI0MzPL5GuaNW7KpHpa+nkXmZmZFbzSNDMzy+SkaWZmlslJ08zMLJOTppmZWSbfCFTj2jd00DjvtmqHYWY2KPr7cXmV8krTzMwsk5OmmZlZJidNMzOzTE6aZmZmmZw0zczMMjlpmpmZZXLSrICkl3rZPkHSGSXP/0bS9ancJOmoPow5X9I5lUdrZmYDzUlzYE0A/pI0I+K3ETErPW0CKk6aZmY2fDhp9oGk8ZLukfSIpHZJx6RNFwD7SFopaYGkRkmrJW0HnA/MTttml68gU7vGVD5X0uOS/h+wb0mbfSTdIalV0lJJ+w3ZpM3MzJ8I1EcvA8dFxAuSJgLLJN0CzAMOjIgmgM4kGBGvSPo3oDkiPp22ze+qY0nTgU9QrEy3BR4BWtPmhcBpEfGEpHcC3wXe20Ufc4G5AHU77z4Q8zUzM5w0+0rAf0iaCWwFJgF7DFDfhwM3RcQmgJSMkTQeOAxYLKmz7diuOoiIhRQJlrENk2OA4jIzG/WcNPvmRGB3YHpEvCppPTCuwj5e442nx3vbfxtgY+cq1szMhp6vafZNPfBsSpjvAfZK9S8CO3WzT/m29cA0AEnTgLem+vuBYyVtL2kn4H8BRMQLwDpJH0v7SNJBAzclMzPrjZNm31wLNEtqB04GHgOIiOeAB9JNPQvK9vkZsH/njUDADcCuktYAnwYeT308AvwYWAXcDjxc0seJwD9JWgWsAY7BzMyGjCJ8yauWjW2YHA1zLq52GGZmg2KwvhpMUmtENJfXe6VpZmaWyUnTzMwsk5OmmZlZJidNMzOzTH6fZo2bMqmelkG6UG5mNtp4pWlmZpbJSdPMzCyTk6aZmVkmJ00zM7NMvhGoxrVv6KBx3m3VDsNsVBmsT6mx6vNK08zMLJOTppmZWSYnTTMzs0xOmmZmZpmcNM3MzDI5aZqZmWUaFUlTUqOk1VUY96UK28+XdE4X9VWJ38zM3mhUJE0zM7OBMJqSZp2kKyStkXSXpO0lNUlaJqlN0k2SdgGQtERScypPlLQ+lQ+QtFzSyrTP5FT/jyX1l0uq6xxU0tckrUrj7JHqGiXdm/q4R9JbyoOVND3ttwo4s6S+yxjMzGzwjaakORm4NCIOADYCxwNXA1+MiKlAO/CVXvo4DfiviGgCmoHfSHo7MBt4d6rfApyY2u8ILIuIg4D7gU+l+m8Di9K41wKXdDHW94Gz0r49xlC+o6S5kloktWzZ1NHLlMzMLNdoSprrImJlKrcC+wATIuK+VLcImNlLHw8CX5L0RWCviPgz8D5gOvCwpJXp+d6p/SvArSVjNqbyocAPU/kHwIzSQSRNSLHdX9KmpxjeICIWRkRzRDTX7VDfy5TMzCzXaEqam0vKW4AJPbR9jddfm3GdlRHxQ+Bo4M/ATyW9FxDFqrEpPfaNiPlpl1cjIkrG7Pdn/XYTg5mZDYHRlDTLdQDPSzo8PT8J6Fx1rqdYPQLM6txB0t7AUxFxCXAzMBW4B5gl6U2pza6S9upl7J8Dn0jlE4GlpRsjYiOwUdKMkjY9xWBmZkNgNCdNgDnAAkltQBNwfqr/BnC6pBXAxJL2HwdWp9OwBwJXR8QvgC8Dd6V+7gYaehn3LODU1P4k4LNdtDkVuDSNpZ5iyJ6tmZn1i14/e2i1aGzD5GiYc3G1wzAbVfzVYCOfpNaIaC6vH+0rTTMzs2xOmmZmZpmcNM3MzDI5aZqZmWXq9/sGbXibMqmeFt+UYGY2ILzSNDMzy+SkaWZmlslJ08zMLJOTppmZWSbfCFTj2jd00DjvtmqHYWbDgD+pqP+80jQzM8vkpGlmZpbJSdPMzCyTk6aZmVkmJ00zM7NMTppmZmaZnDQHgaRGSasz2pxQ8rxZ0iWDH52ZmfWVk2b1NAJ/SZoR0RIRn6leOGZm1ptRmTTTKu8xSddKelTS9ZJ2kPQ+SSsktUu6UtLY1H69pItS/XJJb0v1V0maVdLvS92MtVTSI+lxWNp0AXC4pJWSzpZ0hKRb0z67SvqJpDZJyyRNTfXzU1xLJD0lyUnWzGwIjcqkmewLfDci3g68AHweuAqYHRFTKD4t6fSS9h2p/jvAxRWM8yzwgYiYBswGOk/BzgOWRkRTRHyrbJ/zgBURMRX4EnB1ybb9gA8ChwBfkTSmfEBJcyW1SGrZsqmjglDNzKwnozlp/joiHkjla4D3Aesi4vFUtwiYWdL+upKfh1YwzhjgCkntwGJg/4x9ZgA/AIiIe4HdJO2ctt0WEZsj4g8UCXmP8p0jYmFENEdEc90O9RWEamZmPRnNnz0bZc83Artltu8sv0b6j4ekbYDtutjvbOAZ4KDU9uW+BFtic0l5C6P7GJqZDanRvNJ8i6TOFeMJQAvQ2Hm9EjgJuK+k/eySnw+m8npgeiofTbGqLFcPPB0RW1Ofdan+RWCnbmJbCpwIIOkI4A8R8ULWrMzMbNCM5lXKWuBMSVcCvwA+AywDFkvaFngYuKyk/S6S2ihWev+Q6q4Abpa0CrgD+FMX43wXuEHSyWVt2oAtad+rgBUl+8wHrkzjbQLm9G+qZmY2EBRRfpay9klqBG6NiAMz268HmtN1xBFlbMPkaJhTyX1LZlar/NVg+SS1RkRzef1oPj1rZmZWkVF5ejYi1gNZq8zUvnHQgjEzsxHDK00zM7NMTppmZmaZnDTNzMwyjcprmqPJlEn1tPiOOTOzAeGVppmZWSYnTTMzs0xOmmZmZpmcNM3MzDL5RqAa176hg8Z5t1U7DDPDH2NXC7zSNDMzy+SkaWZmlslJ08zMLJOTppmZWSYnTTMzs0xOmmZmZpmGXdKUNEHSGb20aZR0QkZfjZJW97D9FEnf6UucA7G/mZmNLMMuaQITgB6TJtAI9Jo0q0WS3/9qZlaDhmPSvADYR9JKSQvSY7WkdkmzS9ocntqcnVaUSyU9kh6HVTDenpKWSHpC0lc6KyX9o6TlaYzLJdWl+lMlPS5pOfDukvZXSbpM0kPARZKaJC2T1CbpJkm7pHbd1S+R9C1JLZIelfQOSTemuP49tdlR0m2SVqXXZDZmZjZkhmPSnAf8MiKagGVAE3AQ8H5ggaSG1GZpRDRFxLeAZ4EPRMQ0YDZwSQXjHQIcD0wFPiapWdLbUz/vTnFsAU5MY59HkSxnAPuX9fVm4LCI+DxwNfDFiJgKtAOdCbm7eoBXIqIZuAy4GTgTOBA4RdJuwIeA30bEQRFxIHBHVxOSNDcl35YtmzoqeCnMzKwnw/004gzguojYAjwj6T7gHcALZe3GAN+R1Jng/raCMe6OiOcAJN2YxnwNmA48LAlge4rE/E5gSUT8PrX/cdlYiyNii6R6YEJE3JfqFwGLu6sv2f+W9LMdWBMRT6dxngL2TPXflHQhcGtELO1qQhGxEFgIMLZhclTwWpiZWQ+Ge9LMdTbwDMWKdBvg5Qr2LU8qAQhYFBH/WrpB0rG99PWnCsbtyub0c2tJufP5thHxuKRpwFHAv0u6JyLO7+eYZmaWaTienn0R2CmVlwKzJdVJ2h2YCSwvawNQDzwdEVuBk4C6Csb7gKRdJW0PHAs8ANwDzJL0JoC0fS/gIeDvJO0maQzwsa46jIgO4HlJh6eqk4D7uqvPDVTS3wCbIuIaYAEwrYJ5mplZPw27lWZEPCfpgfRWkduBNmAVxQrwCxHxO0nPAVskrQKuAr4L3CDpZIrrfJWs+JYDN1Bcj7wmIloAJH0ZuEvSNsCrwJkRsUzSfOBBYCOwsod+5wCXSdoBeAo4tZf6HFMorutuTTGdXsG+ZmbWT4rwJa9aNrZhcjTMubjaYZgZ/mqwkURSa7ox8w2G4+lZMzOzYWnYnZ4dDJI+CFxYVr0uIo6rRjxmZjYyjYqkGRF3AndWOw4zMxvZfHrWzMws06hYaY5mUybV0+KbD8zMBoRXmmZmZpmcNM3MzDI5aZqZmWVy0jQzM8vkG4FqXPuGDhrn3VbtMMxsAPmTharHK00zM7NMTppmZmaZnDTNzMwyOWmamZllctI0MzPL5KRpZmaWyUnTzMwsU00nTUkTJJ3RS5tGSSdk9NUoafXARWdmZiNNTSdNYALQY9IEGoFek2YlJPlDI8zMalCtJ80LgH0krZS0ID1WS2qXNLukzeGpzdlpRblU0iPpcVjOQJJOkXSLpHuBeyTtKuknktokLZM0NbXrrn6+pEVp7F9J+qiki1Ksd0gak9pdIOkXaf9vdBPLXEktklq2bOro72toZmZJra+I5gEHRkSTpOOB04CDgInAw5LuT23OiYi/B5C0A/CBiHhZ0mTgOqA5c7xpwNSI+KOkbwMrIuJYSe8FrgaagPO6qQfYB3gPsD/wIHB8RHxB0k3ARyQtBY4D9ouIkDShqyAiYiGwEGBsw+TIfbHMzKxntb7SLDUDuC4itkTEM8B9wDu6aDcGuEJSO7CYIoHlujsi/lgy3g8AIuJeYDdJO/dQD3B7RLwKtAN1wB2pvp3iNHIH8DLw35I+CmyqIDYzM+un0ZQ0c50NPEOxIm0Gtqtg3z/1c+zNABGxFXg1IjpXiVuBbSPiNeAQ4Hrg73k9qZqZ2RCo9aT5IrBTKi8FZkuqk7Q7MBNYXtYGoB54OiWukyhWfH2xFDgRQNIRwB8i4oUe6nslaTxQHxE/pUjuB/UxNjMz64OavqYZEc9JeiC9VeR2oA1YBQTwhYj4naTngC2SVgFXAd8FbpB0MsVKrq+rx/nAlZLaKE6jzumlPsdOwM2SxgECPt/H2MzMrA/0+hlAq0VjGyZHw5yLqx2GmQ0gf5/m4JPUGhF/dRNorZ+eNTMzGzA1fXp2MEj6IHBhWfW6iDiuGvGYmdnQcdKsUETcCdxZ7TjMzGzoOWnWuCmT6mnx9Q8zswHha5pmZmaZnDTNzMwyOWmamZllctI0MzPL5BuBalz7hg4a591W7TDMhpTf/G+DxStNMzOzTE6aZmZmmZw0zczMMjlpmpmZZXLSNDMzy+SkaWZmlslJ08zMLFOvSVNSo6TVgxWApJ8PVt/9VTp3Sc2SLql2TGZmVj1V/3CDiDis2jHkiIgWoKXacZiZWfXknp6tk3SFpDWS7pK0vaQmScsktUm6SdIuAJKWSGpO5YmS1qfyAZKWS1qZ9pmc6l9KP49I+14v6TFJ10pS2nZUqmuVdImkW7sLVNJ8SYskLZX0K0kflXSRpHZJd0gak9pNl3Rf6vNOSQ0l9askrQLOLOn3iM5xJR0i6UFJKyT9XNK+qf4USTemcZ6QdFFPL6qk70lqSa/reSX1Xc5X0o6Srkyv4wpJx3TT79zUb8uWTR09hWBmZhXITZqTgUsj4gBgI3A8cDXwxYiYCrQDX+mlj9OA/4qIJqAZ+E0XbQ4GPgfsD+wNvFvSOOBy4MMRMR3YPSPefYD3AkcD1wA/i4gpwJ+Bj6TE+W1gVurzSuBrad/vA2dFxEE99P8YcHhEHAz8G/AfJduagNnAFGC2pD176OfciGgGpgJ/J2lqL/M9F7g3Ig4B3gMskLRjeacRsTAimiOiuW6H+h6GNzOzSuSenl0XEStTuZUiKU2IiPtS3SJgcS99PAicK+nNwI0R8UQXbZZHxG8AJK0EGoGXgKciYl1qcx0wt5exbo+IVyW1A3XAHam+PfW5L3AgcHdazNYBT0uakOZ1f2r/A+DDXfRfDyxKq+UAxpRsuyciOtIcfgHsBfy6mzg/LmkuxXFooPjPwjY9zPdI4GhJ56Tn44C3AI/2/HKYmdlAyE2am0vKW4AJPbR9jddXsOM6KyPih5IeAj4C/FTSP0fEvb2M09drrpvTmFslvRoRkeq3pj4FrImIQ0t3Skkzx1cpVq/HSWoElpSPnXQ7B0lvBc4B3hERz0u6ipLXqxsCjo+ItZlxmpnZAOrrW046gOclHZ6enwR0rjrXA9NTeVbnDpL2plhBXQLcTHFKMsdaYO+UnKA49dlfa4HdJR2aYhsj6YCI2AhslDQjtTuxm/3rgQ2pfEofY9gZ+BPQIWkPXl/R9jTfO4GzSq71HtzHsc3MrA/68z7NORTX1NooruOdn+q/AZwuaQUwsaT9x4HV6bTrgRTXRHsVEX8GzgDukNQKvEiRtPssIl6hSOgXpht+VgKdd/GeClya4lQ3XVwEfD3NsU+r4YhYBayguD76Q+CBVN/TfL9KcSq4TdKa9NzMzIaIXj9zOXxJGh8RL6UV1qXAExHxrWrHNVgGcr5jGyZHw5yLBzZAs2HO36dp/SWpNd2o+QYj5ROBPpVWfmsoTo1eXuV4Bttom6+Z2YhQ9Q83yJFWWW9YaUk6FfhsWdMHIuJMhpl0A9TYsuqTIqK9q/ZdzdfMzKpvRCTNrkTE9yneUznsRcQ7qx2DmZn130g5PWtmZlZ1I3alaXmmTKqnxTdFmJkNCK80zczMMjlpmpmZZXLSNDMzy+SkaWZmlsk3AtW49g0dNM67rdphmNkQ8CchDT6vNM3MzDI5aZqZmWVy0jQzM8vkpGlmZpbJSdPMzCyTk6aZmVkmJ00zM7NMNZs0JS2R1JzKP5U0YQD7Pk3SyQPVn5mZjQyj4sMNIuKoAe7vsoHsz8zMRoZhtdKU1CjpMUlXSXpc0rWS3i/pAUlPSDpE0o6SrpS0XNIKScekfbeX9CNJj0q6Cdi+pN/1kiam8k8ktUpaI2luSZuXJH1N0ipJyyTt0UOc8yWdk8pLJF2Y4nlc0uGpvk7SNyStltQm6axU/74Ud3uax9iSGL8uaaWkFknTJN0p6ZeSTisZ+18kPZz6PK+b+OamPlq2bOroxxExM7NSwyppJm8Dvgnslx4nADOAc4AvAecC90bEIcB7gAWSdgROBzZFxNuBrwDTu+n/kxExHWgGPiNpt1S/I7AsIg4C7gc+VUHM26Z4PpfGBpgLNAJNETEVuFbSOOAqYHZETKFY6Z9e0s//REQTsDS1mwW8CzgPQNKRwGTgEKAJmC5pZnkwEbEwIpojorluh/oKpmFmZj0ZjklzXUS0R8RWYA1wT0QE0E6RhI4E5klaCSwBxgFvAWYC1wBERBvQ1k3/n5G0ClgG7EmRhABeAW5N5dY0Vq4bu9jv/cDlEfFaiumPwL5pfo+nNotS3J1uST/bgYci4sWI+D2wOV2TPTI9VgCPUPynYjJmZjYkhuM1zc0l5a0lz7dSxLsFOD4i1pbuJKnXjiUdQZHMDo2ITZKWUCRdgFdTciaNUclr0xljpft110/pvDufbwsI+HpEXN6PMczMrI+G40qzN3cCZyllSUkHp/r7KU7lIulAYGoX+9YDz6eEuR/Fqc/Bcjfwz5K2TTHtCqwFGiW9LbU5Cbivgj7vBD4paXzqc5KkNw1gzGZm1oORmDS/CowB2iStSc8BvgeMl/QocD7FqdJydwDbpjYXUJyiHSz/G/ifFOcq4ISIeBk4FVgsqZ1iBZl9J25E3AX8EHgw7X89sNOAR25mZl3S62ckrRaNbZgcDXMurnYYZjYE/H2aA0dSa0Q0l9ePxJWmmZlZVQzHG4GGDUnnAh8rq14cEV+rRjxmZlZdTpo9SMnRCdLMzAAnzZo3ZVI9Lb7OYWY2IHxN08zMLJOTppmZWSYnTTMzs0xOmmZmZpmcNM3MzDI5aZqZmWVy0jQzM8vkpGlmZpbJSdPMzCyTv+Wkxkl6keJ7PGvJROAP1Q5iANXafKD25lRr8wHPqTd7RcTu5ZX+GL3at7arr7cZySS11NKcam0+UHtzqrX5gOfUVz49a2ZmlslJ08zMLJOTZu1bWO0ABkGtzanW5gO1N6damw94Tn3iG4HMzMwyeaVpZmaWyUnTzMwsk5NmjZL0IUlrJT0paV614+mNpPWS2iWtlNSS6naVdLekJ9LPXVK9JF2S5tYmaVpJP3NS+yckzRniOVwp6VlJq0vqBmwOkqan1+jJtK+qMJ/5kjak47RS0lEl2/41xbZW0gdL6rv8XZT0VkkPpfofS9pukOezp6SfSfqFpDWSPpvqR/Ix6m5OI/k4jZO0XNKqNKfzeopD0tj0/Mm0vbGvc80SEX7U2AOoA34J7A1sB6wC9q92XL3EvB6YWFZ3ETAvlecBF6byUcDtgIB3AQ+l+l2Bp9LPXVJ5lyGcw0xgGrB6MOYALE9tlfb9cBXmMx84p4u2+6ffs7HAW9PvX11Pv4vA/wE+kcqXAacP8nwagGmpvBPweIp7JB+j7uY0ko+TgPGpPAZ4KL2mXcYBnAFclsqfAH7c17nmPLzSrE2HAE9GxFMR8QrwI+CYKsfUF8cAi1J5EXBsSf3VUVgGTJDUAHwQuDsi/hgRzwN3Ax8aqmAj4n7gj2XVAzKHtG3niFgWxV+Eq0v6Gsr5dOcY4EcRsTki1gFPUvwedvm7mFZg7wWuT/uXvjaDIiKejohHUvlF4FFgEiP7GHU3p+6MhOMUEfFSejomPaKHOEqP3/XA+1LcFc01Nz4nzdo0Cfh1yfPf0PM/pOEggLsktUqam+r2iIinU/l3wB6p3N38huO8B2oOk1K5vL4aPp1OV17ZeSqTyuezG7AxIl4rqx8S6RTewRSrmJo4RmVzghF8nCTVSVoJPEvxn5Jf9hDHX2JP2ztS3IPyd8JJ04aLGRExDfgwcKakmaUb0//cR/T7o2phDsD3gH2AJuBp4JvVDadyksYDNwCfi4gXSreN1GPUxZxG9HGKiC0R0QS8mWJluF+VQ/oLJ83atAHYs+T5m1PdsBURG9LPZ4GbKP6hPJNOeZF+Ppuadze/4TjvgZrDhlQurx9SEfFM+oO2FbiC4jhB5fN5juJ057Zl9YNK0hiK5HJtRNyYqkf0MepqTiP9OHWKiI3Az4BDe4jjL7Gn7fUp7kH5O+GkWZseBianu822o7g4fkuVY+qWpB0l7dRZBo4EVlPE3Hln4hzg5lS+BTg53d34LqAjnV67EzhS0i7pdNSRqa6aBmQOadsLkt6VrtecXNLXkOlMLslxFMcJivl8It3J+FZgMsVNMV3+LqYV3c+AWWn/0tdmsGIX8N/AoxHxnyWbRuwx6m5OI/w47S5pQipvD3yA4lptd3GUHr9ZwL0p7ormmh3gQNzt5Mfwe1Dc+fc4xbWAc6sdTy+x7k1xB9sqYE1nvBTXJe4BngD+L7BrqhdwaZpbO9Bc0tcnKS74PwmcOsTzuI7iVNirFNdJ/mkg5wA0U/zx+yXwHdIneg3xfH6Q4m1Lf2gaStqfm2JbS8ldo939LqbjvjzNczEwdpDnM4Pi1GsbsDI9jhrhx6i7OY3k4zQVWJFiXw38W09xAOPS8yfT9r37Otechz9Gz8zMLJNPz5qZmWVy0jQzM8vkpGlmZpbJSdPMzCyTk6aZmVkmJ00zM7NMTppmZmaZ/j9lo9nx8XrxIQAAAABJRU5ErkJggg=="
+ ]
},
"metadata": {
"needs_background": "light"
@@ -388,8 +351,8 @@
"output_type": "stream",
"name": "stdout",
"text": [
- "Predicted labels [144012.37488361 251501.98425004 147503.3849682 ... 219178.01297482\n",
- " 213834.88677304 272956.11149784]\n",
+ "Predicted labels [143391.65036598 245535.13731975 153171.44071644 ... 184354.52735665\n",
+ " 235510.49470402 282617.22858849]\n",
"True labels 14740 136900.0\n",
"10101 241300.0\n",
"20566 200700.0\n",
@@ -427,9 +390,9 @@
"output_type": "stream",
"name": "stdout",
"text": [
- "r2 = 0.8540590968156087\n",
- "mse = 1929120783.4023921\n",
- "mae = 28944.167002684408\n"
+ "r2 = 0.8505434326525669\n",
+ "mse = 1975592613.1389656\n",
+ "mae = 29471.536046101864\n"
]
}
],
@@ -457,15 +420,13 @@
"name": "stdout",
"text": [
"{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.09999999999999995, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.09999999999999995, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 12, 'learning_rate': 0.26770501231052046, 'log_max_bin': 7, 'colsample_bytree': 1.0, 'reg_alpha': 0.001348364934537134, 'reg_lambda': 1.4442580148221913}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 12, 'learning_rate': 0.26770501231052046, 'log_max_bin': 7, 'colsample_bytree': 1.0, 'reg_alpha': 0.001348364934537134, 'reg_lambda': 1.4442580148221913}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 11, 'num_leaves': 4, 'min_child_samples': 9, 'learning_rate': 0.7260594590615893, 'log_max_bin': 9, 'colsample_bytree': 0.9285002286474459, 'reg_alpha': 0.0036840681931986645, 'reg_lambda': 0.7532480505730402}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 11, 'num_leaves': 4, 'min_child_samples': 9, 'learning_rate': 0.7260594590615893, 'log_max_bin': 9, 'colsample_bytree': 0.9285002286474459, 'reg_alpha': 0.0036840681931986645, 'reg_lambda': 0.7532480505730402}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 13, 'num_leaves': 5, 'min_child_samples': 5, 'learning_rate': 0.7590459488450945, 'log_max_bin': 8, 'colsample_bytree': 0.8304072431299575, 'reg_alpha': 0.001951378031519758, 'reg_lambda': 0.04792552866398477}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 13, 'num_leaves': 5, 'min_child_samples': 5, 'learning_rate': 0.7590459488450945, 'log_max_bin': 8, 'colsample_bytree': 0.8304072431299575, 'reg_alpha': 0.001951378031519758, 'reg_lambda': 0.04792552866398477}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 44, 'num_leaves': 4, 'min_child_samples': 4, 'learning_rate': 0.41929025492645006, 'log_max_bin': 8, 'colsample_bytree': 0.7610534336273627, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.009280655005879927}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 44, 'num_leaves': 4, 'min_child_samples': 4, 'learning_rate': 0.41929025492645006, 'log_max_bin': 8, 'colsample_bytree': 0.7610534336273627, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.009280655005879927}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 141, 'num_leaves': 17, 'min_child_samples': 3, 'learning_rate': 0.17402065726724145, 'log_max_bin': 8, 'colsample_bytree': 0.6649148062238498, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.006761362450996487}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 141, 'num_leaves': 17, 'min_child_samples': 3, 'learning_rate': 0.17402065726724145, 'log_max_bin': 8, 'colsample_bytree': 0.6649148062238498, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.006761362450996487}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 88, 'num_leaves': 70, 'min_child_samples': 4, 'learning_rate': 0.09348689572544734, 'log_max_bin': 7, 'colsample_bytree': 0.5967846088487322, 'reg_alpha': 0.006958608037974516, 'reg_lambda': 0.001895876878997586}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 88, 'num_leaves': 70, 'min_child_samples': 4, 'learning_rate': 0.09348689572544734, 'log_max_bin': 7, 'colsample_bytree': 0.5967846088487322, 'reg_alpha': 0.006958608037974516, 'reg_lambda': 0.001895876878997586}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 166, 'num_leaves': 34, 'min_child_samples': 2, 'learning_rate': 0.11549142333280608, 'log_max_bin': 8, 'colsample_bytree': 0.6469726212777197, 'reg_alpha': 0.032619809462956464, 'reg_lambda': 0.00406523645285879}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 166, 'num_leaves': 34, 'min_child_samples': 2, 'learning_rate': 0.11549142333280608, 'log_max_bin': 8, 'colsample_bytree': 0.6469726212777197, 'reg_alpha': 0.032619809462956464, 'reg_lambda': 0.00406523645285879}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 108, 'num_leaves': 169, 'min_child_samples': 2, 'learning_rate': 0.07154128424526202, 'log_max_bin': 9, 'colsample_bytree': 0.591579264701285, 'reg_alpha': 0.01435520144866301, 'reg_lambda': 0.006874802748054271}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 108, 'num_leaves': 169, 'min_child_samples': 2, 'learning_rate': 0.07154128424526202, 'log_max_bin': 9, 'colsample_bytree': 0.591579264701285, 'reg_alpha': 0.01435520144866301, 'reg_lambda': 0.006874802748054271}}\n",
- "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 256, 'num_leaves': 79, 'min_child_samples': 4, 'learning_rate': 0.06020420143131026, 'log_max_bin': 10, 'colsample_bytree': 0.6501336877031868, 'reg_alpha': 0.11324823332770402, 'reg_lambda': 0.007122448821650475}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 256, 'num_leaves': 79, 'min_child_samples': 4, 'learning_rate': 0.06020420143131026, 'log_max_bin': 10, 'colsample_bytree': 0.6501336877031868, 'reg_alpha': 0.11324823332770402, 'reg_lambda': 0.007122448821650475}}\n"
+ "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 12, 'min_child_samples': 15, 'learning_rate': 0.2284139062380884, 'log_max_bin': 9, 'colsample_bytree': 1.0, 'reg_alpha': 0.0014700173967242716, 'reg_lambda': 7.624911621832711}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 12, 'min_child_samples': 15, 'learning_rate': 0.2284139062380884, 'log_max_bin': 9, 'colsample_bytree': 1.0, 'reg_alpha': 0.0014700173967242716, 'reg_lambda': 7.624911621832711}}\n",
+ "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 21, 'min_child_samples': 12, 'learning_rate': 0.5082200481556807, 'log_max_bin': 8, 'colsample_bytree': 0.9696263001275751, 'reg_alpha': 0.0028107036379524425, 'reg_lambda': 3.716898117989413}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 21, 'min_child_samples': 12, 'learning_rate': 0.5082200481556807, 'log_max_bin': 8, 'colsample_bytree': 0.9696263001275751, 'reg_alpha': 0.0028107036379524425, 'reg_lambda': 3.716898117989413}}\n",
+ "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 20, 'num_leaves': 12, 'min_child_samples': 15, 'learning_rate': 0.2284139062380884, 'log_max_bin': 9, 'colsample_bytree': 1.0, 'reg_alpha': 0.0014700173967242718, 'reg_lambda': 7.624911621832699}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 20, 'num_leaves': 12, 'min_child_samples': 15, 'learning_rate': 0.2284139062380884, 'log_max_bin': 9, 'colsample_bytree': 1.0, 'reg_alpha': 0.0014700173967242718, 'reg_lambda': 7.624911621832699}}\n",
+ "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 78, 'num_leaves': 10, 'min_child_samples': 24, 'learning_rate': 0.07647794276357107, 'log_max_bin': 10, 'colsample_bytree': 1.0, 'reg_alpha': 0.001749539645587163, 'reg_lambda': 4.373760956394571}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 78, 'num_leaves': 10, 'min_child_samples': 24, 'learning_rate': 0.07647794276357107, 'log_max_bin': 10, 'colsample_bytree': 1.0, 'reg_alpha': 0.001749539645587163, 'reg_lambda': 4.373760956394571}}\n",
+ "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 171, 'num_leaves': 15, 'min_child_samples': 49, 'learning_rate': 0.09991937598563264, 'log_max_bin': 9, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009940547384005775, 'reg_lambda': 1.1214041135390789}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 171, 'num_leaves': 15, 'min_child_samples': 49, 'learning_rate': 0.09991937598563264, 'log_max_bin': 9, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009940547384005775, 'reg_lambda': 1.1214041135390789}}\n",
+ "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 230, 'num_leaves': 61, 'min_child_samples': 58, 'learning_rate': 0.11237861230007634, 'log_max_bin': 9, 'colsample_bytree': 0.9596144262255549, 'reg_alpha': 0.0009765625, 'reg_lambda': 20.911712312854934}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 230, 'num_leaves': 61, 'min_child_samples': 58, 'learning_rate': 0.11237861230007634, 'log_max_bin': 9, 'colsample_bytree': 0.9596144262255549, 'reg_alpha': 0.0009765625, 'reg_lambda': 20.911712312854934}}\n",
+ "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 363, 'num_leaves': 216, 'min_child_samples': 42, 'learning_rate': 0.09100963138990395, 'log_max_bin': 8, 'colsample_bytree': 0.8025848209352517, 'reg_alpha': 0.001113000336715291, 'reg_lambda': 76.50614276906414}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 363, 'num_leaves': 216, 'min_child_samples': 42, 'learning_rate': 0.09100963138990395, 'log_max_bin': 8, 'colsample_bytree': 0.8025848209352517, 'reg_alpha': 0.001113000336715291, 'reg_lambda': 76.50614276906414}}\n"
]
}
],
@@ -493,11 +454,10 @@
{
"output_type": "display_data",
"data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEWCAYAAABrDZDcAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAc/UlEQVR4nO3df7xVdZ3v8debI8rx55FABw4glEj5oyTJruWUOjmgU4JpjnpnbtotdCadRr2UNGam15tF2bXHJR10TO2qiIaIRjFO/qj8BSgqguHgj4SDP1DEX51E4DN/rHVosd1ns4Gz9t5nr/fz8diPs9d3fddan73E/dnf73et71JEYGZmxdWn3gGYmVl9ORGYmRWcE4GZWcE5EZiZFZwTgZlZwTkRmJkVnBOBWQWS/lLS0nrHYZYnJwJrWJKek/SZesYQEb+NiFF57V/SWEm/kfSmpFWS7pV0TF7HMyvHicAKTVJLHY99PHAzcB0wBNgTOB/43FbsS5L8/7NtFf/DsV5HUh9J50p6WtKrkmZI6p9Zf7OkFyW9nv7a3i+z7hpJl0uaI+lt4PC05fG/JD2ebnOTpH5p/cMkrchs323ddP3XJb0gaaWkL0sKSXuX+QwCLgUuioirIuL1iNgQEfdGxFfSOhdI+v+ZbYan+9suXb5H0sWS7gP+CEyStKDkOGdJmp2+30HSDyQ9L+klSVdIat3G/xzWBJwIrDc6E5gAfBoYDLwGTM2s/yUwEtgDeAS4vmT7k4GLgV2A36VlJwDjgBHAh4FTKhy/bF1J44Czgc8AewOHVdjHKGAocEuFOtX4e2AiyWe5AhglaWRm/cnADen7S4B9gAPT+NpJWiBWcE4E1hudDvxLRKyIiHeAC4Dju34pR8TVEfFmZt1HJO2W2f62iLgv/QX+p7TsxxGxMiJWA7eTfFl2p7u6JwA/jYjFEfHH9NjdeV/694VqP3Q3rkmPty4iXgduA04CSBPCB4HZaQtkInBWRKyOiDeB/wOcuI3HtybgRGC90V7ArZLWSFoDPAmsB/aU1CLpkrTb6A3guXSbAZntl5fZ54uZ938Edq5w/O7qDi7Zd7njdHk1/TuoQp1qlB7jBtJEQNIamJUmpYHAjsDDmfP2q7TcCs6JwHqj5cBREdGWefWLiA6SL7/xJN0zuwHD022U2T6vKXdfIBn07TK0Qt2lJJ/juAp13ib58u7yF2XqlH6WO4GBkg4kSQhd3UKvAJ3AfplztltEVEp4VhBOBNbo+krql3ltR9IXfrGkvQAkDZQ0Pq2/C/AOyS/uHUm6P2plBnCqpA9J2hH4VncVI5n//WzgW5JOlbRrOgh+qKRpabVHgU9JGpZ2bU3eXAAR8S7JlUhTgP4kiYGI2ABcCfxI0h4Aktoljd3qT2tNw4nAGt0ckl+yXa8LgMuA2cC/S3oTeBD4eFr/OuAPQAewJF1XExHxS+DHwN3Assyx3+mm/i3A3wJfAlYCLwH/m6Sfn4i4E7gJeBx4GLijylBuIGkR3RwR6zLl3+iKK+02+w+SQWsrOPnBNGb5kPQh4Algh5IvZLOG4haBWQ+SdGx6vf7uwPeA250ErNE5EZj1rNOAl4GnSa5k+of6hmO2ee4aMjMrOLcIzMwKbrt6B7ClBgwYEMOHD693GGZmvcrDDz/8SkSUvYGw1yWC4cOHs2DBgs1XNDOzjST9obt17hoyMys4JwIzs4JzIjAzKzgnAjOzgnMiMDMruF531ZCZWdHMWtjBlLlLWbmmk8FtrUwaO4oJo9t7bP9OBGZmDWzWwg4mz1xE57vrAehY08nkmYsAeiwZOBEUXN6/NMxs20yZu3RjEujS+e56psxd6kRg264WvzTMbNusXNO5ReVbw4mgwLr7pfH1Wx7nxnnP1ykqM8vq29KHtes3vKd8cFtrjx3DVw0VWHe/KMr9ozOz+hjav5U+2rSstW8Lk8b23MPl3CIosMFtrXSUSQbtba3cdNohdYjIzMrxVUNNohEHZSeNHbXJGAH0/C8NM9t2E0a35/p94URQA406KNt17K/f8jhr12+gvUESlJnVlhNBDTT6oOwOffswelibu4PMCsqDxTXQ6IOy+w7alfEHuhVgVlRuEdSAB2XNrJG5RVADk8aOorVvyyZlHpQ1s0bhFkENeFDWzBqZE0GNTBjdvnFg2N1BZtZIcu0akjRO0lJJyySdW2b9MEl3S1oo6XFJR+cZj5mZvVduiUBSCzAVOArYFzhJ0r4l1c4DZkTEaOBE4Cd5xWNmZuXl2SI4GFgWEc9ExFpgOjC+pE4Au6bvdwNW5hiPmZmVkWciaAeWZ5ZXpGVZFwB/J2kFMAc4s9yOJE2UtEDSglWrVuURq5lZYdX78tGTgGsiYghwNPAzSe+JKSKmRcSYiBgzcODAmgdpZtbM8kwEHcDQzPKQtCzrfwIzACLiAaAfMCDHmMzMrESeiWA+MFLSCEnbkwwGzy6p8zzwVwCSPkSSCNz3Y2ZWQ7ndRxAR6ySdAcwFWoCrI2KxpAuBBRExGzgHuFLSWSQDx6dEROQV07ZoxGmkzcx6Qq43lEXEHJJB4GzZ+Zn3S4BP5hlDT2jUaaTNzHqC7yyuQk9NI73khTfYd9Cum69oZlZD9b5qqFfoqWmkPd2zmTUitwiq4GmkzayZuUVAMgbwyUvuYsS5v+CTl9zFrIWbXuXqaaTNrJkVvkVQzUCwp5E2s2ZW+ESwJQPBfravmTWjwncNbclAsAd7zawZFb5F4IFgMyu6wrcIPBBsZkVX+BaBB4LNrOgKnwjAzxM2s2IrfNeQmVnRORGYmRWcE4GZWcE5EZiZFZwTgZlZwTkRmJkVnBOBmVnBORGYmRWcE4GZWcE5EZiZFZwTgZlZwTkRmJkVnBOBmVnBORGYmRVcrtNQSxoHXAa0AFdFxCUl638EHJ4u7gjsERFtecbUZdbCDqbMXcrKNZ0MbmulX98+DNh5h1oc2sysoeSWCCS1AFOBI4EVwHxJsyNiSVediDgrU/9MYHRe8WTNWtjB5JmLNj60vmNNJ31UiyObmTWePLuGDgaWRcQzEbEWmA6Mr1D/JODGHOPZaMrcpRuTQJcNActXl3+QvZlZM8szEbQDyzPLK9Ky95C0FzACuKub9RMlLZC0YNWqVdsc2MoyD6sHWLt+wzbv28yst2mUweITgVsiYn25lRExLSLGRMSYgQMHbvPBBre1li1v76bczKyZ5ZkIOoChmeUhaVk5J1KjbiGASWNH0dq3ZZOy1r4tTBo7qlYhmJk1jDwTwXxgpKQRkrYn+bKfXVpJ0geB3YEHcoxlExNGt/Pdzx/A9i3Jx29va+W7nz+ACaPL9lyZmTW13K4aioh1ks4A5pJcPnp1RCyWdCGwICK6ksKJwPSIiLxiKWfC6HZunPc8ADeddkgtD21m1lByvY8gIuYAc0rKzi9ZviDPGMzMrLJGGSw2M7M6cSIwMys4JwIzs4JzIjAzKzgnAjOzgnMiMDMrOCcCM7OCcyIwMys4JwIzs4JzIjAzKzgnAjOzgnMiMDMrOCcCM7OCcyIwMys4JwIzs4KrmAgk7SrpA2XKP5xfSGZmVkvdJgJJJwC/B34uabGkj2VWX5N3YGZmVhuVWgTfBA6KiAOBU4GfSTo2XafcIzMzs5qo9KjKloh4ASAi5kk6HLhD0lCgps8XNjOz/FRqEbyZHR9Ik8JhwHhgv5zjMjOzGqnUIvgHSrqAIuJNSeOAE3KNyszMaqbbFkFEPAY8K+nukvJ3I+L63CMzM7OaqHj5aESsBzZI2q1G8ZiZWY1V6hrq8hawSNKdwNtdhRHxT7lFZWZmNVNNIpiZvszMrAltNhFExLVbu/N0YPkyoAW4KiIuKVPnBOACkktSH4uIk7f2eGZmtuWqaRFsFUktwFTgSGAFMF/S7IhYkqkzEpgMfDIiXpO0R17xmJlZeXlOOncwsCwinomItcB0knsQsr4CTI2I1wAi4uUc4zEzszLyTATtwPLM8oq0LGsfYB9J90l6MO1Keg9JEyUtkLRg1apVOYVrZlZMm+0akrQPMAnYK1s/Io7ooeOPJLljeQjwG0kHRMSabKWImAZMAxgzZoyntzAz60HVjBHcDFwBXAms34J9dwBDM8tD0rKsFcBDEfEuyc1rT5EkhvlbcBwzM9sG1SSCdRFx+Vbsez4wUtIIkgRwIlB6RdAs4CTgp5IGkHQVPbMVxzIzs61UzRjB7ZL+UdIgSf27XpvbKCLWAWcAc4EngRkRsVjShZKOSavNBV6VtAS4G5gUEa9u5WcxM7OtUE2L4Ivp30mZsgDev7kNI2IOMKek7PzM+wDOTl9mZlYH1dxQNqIWgZiZWX1Uc9VQX5IpqT+VFt0D/Gs6wGtmZr1cNV1DlwN9gZ+ky3+fln05r6DMzKx2qkkEH4uIj2SW75L0WF4BmZlZbVVz1dD67CMrJb2fLbufwMzMGlg1LYJJwN2SniF5dOVewKm5RmVmZjVTzVVDv05nCR2VFi2NiHfyDcvMzGql20Qg6YiIuEvS50tW7S2JiPDDaszMmkClFsGngbuAz5VZF/ipZWZmTaHbRBAR307fXhgRz2bXpfMHmZlZE6jmqqGflym7pacDMTOz+qg0RvBBYD9gt5Jxgl2BfnkHZmZmtVFpjGAU8FmgjU3HCd4kecSkmZk1gUpjBLcBt0k6JCIeqGFMZmZWQ9XcULZQ0ldJuok2dglFxJdyi8rMzGqmmsHinwF/AYwF7iV55OSbeQZlZma1U00i2DsivgW8HRHXAn8DfDzfsMzMrFaq6Rrqeu7AGkn7Ay8Ce+QXUn5mLexgytylrFzTyeC2Vvr17cOAnXeod1hmZnVVTSKYJml34FvAbGBn4PzKmzSeWQs7mDxzEZ3vJhOndqzppI/qHJSZWQOoZtK5q9K391LFc4ob1ZS5SzcmgS4bApav7qxTRGZmjaHSDWUVHygfEZf2fDj5Wbmm/Bf+2vUbahyJmVljqdQi2CX9Owr4GEm3ECQ3l83LM6g8DG5rpaNMMmhva61DNGZmjaPbq4Yi4jsR8R2Sy0U/GhHnRMQ5wEHAsFoF2FMmjR1Fa9+WTcpa+7YwaeyobrYwMyuGagaL9wTWZpbXpmW9yoTR7QB8/ZbHWbt+A+1trUwaO2pjuZlZUVWTCK4D5km6NV2eAFyTW0Q5mjC6nRvnPQ/ATacdUudozMwaw2ZvKIuIi0meUfxa+jo1Ir5bzc4ljZO0VNIySeeWWX+KpFWSHk1fX97SD2BmZtum0lVDu0bEG5L6A8+lr651/SNidaUdS2oBpgJHAiuA+ZJmR8SSkqo3RcQZWxm/mZlto0pdQzeQTEP9MMmjKbsoXd7cPQUHA8si4hkASdOB8UBpIjAzszqqNA31Z9O/W/tYynZgeWZ5BeXnKDpO0qeAp4CzImJ5aQVJE4GJAMOG9boLlszMGlqlrqGPVtowIh7pgePfDtwYEe9IOg24FjiizLGmAdMAxowZE6Xrzcxs61XqGvphhXVBmS/sEh3A0MzykLTszzuJeDWzeBXw/c3s08zMelilrqHDt3Hf84GRkkaQJIATgZOzFSQNiogX0sVjgCe38ZhmZraFqrmPgHT66X3Z9All11XaJiLWSToDmAu0AFdHxGJJFwILImI28E+SjgHWAauBU7bqU5iZ2VbbbCKQ9G3gMJJEMAc4CvgdyY1mFUXEnHSbbNn5mfeTgclbFLGZmfWoap5QdjzwV8CLEXEq8BFgt1yjMjOzmqkmEXRGxAZgnaRdgZfZdBDYzMx6sWrGCBZIagOuJLm57C3ggVyjMjOzmql0H8FU4IaI+Me06ApJvwJ2jYjHaxKdmZnlrlKL4CngB5IGATNIbvxaWJuwzMysVio9mOayiDgE+DTwKnC1pN9L+rakfWoWoZmZ5aqaaaj/EBHfi4jRwEkkzyPwjV9mZk1is4lA0naSPifpeuCXwFLg87lHZmZmNVFpsPhIkhbA0SQPq58OTIyIt2sUm5mZ1UClweLJJM8kOCciXqtRPGZmVmOVJp3b3OyiZmbWBKq5s9jMzJqYE4GZWcE5EZiZFZwTgZlZwTkRmJkVnBOBmVnBORGYmRWcE4GZWcE5EZiZFZwTgZlZwTkRmJkVnBOBmVnBORGYmRWcE4GZWcHlmggkjZO0VNIySedWqHecpJA0Js94zMzsvXJLBJJagKnAUcC+wEmS9i1Tbxfga8BDecViZmbdy7NFcDCwLCKeiYi1JI+6HF+m3kXA94A/5RiLmZl1I89E0A4szyyvSMs2kvRRYGhE/KLSjiRNlLRA0oJVq1b1fKRmZgVWt8FiSX2AS4FzNlc3IqZFxJiIGDNw4MD8gzMzK5A8E0EHMDSzPCQt67ILsD9wj6TngP8GzPaAsZlZbeWZCOYDIyWNkLQ9cCIwu2tlRLweEQMiYnhEDAceBI6JiAU5xmRmZiVySwQRsQ44A5gLPAnMiIjFki6UdExexzUzsy2zXZ47j4g5wJySsvO7qXtYnrGYmVl5vrPYzKzgnAjMzArOicDMrOCcCMzMCs6JwMys4JwIzMwKzonAzKzgnAjMzArOicDMrOCcCMzMCs6JwMys4JwIzMwKzonAzKzgnAjMzArOicDMrOCcCMzMCs6JwMys4JwIzMwKzonAzKzgnAjMzArOicDMrOCcCMzMCs6JwMys4JwIzMwKzonAzKzgck0EksZJWippmaRzy6w/XdIiSY9K+p2kffOMx8zM3iu3RCCpBZgKHAXsC5xU5ov+hog4ICIOBL4PXJpXPGZmVl6eLYKDgWUR8UxErAWmA+OzFSLijcziTkDkGI+ZmZWxXY77bgeWZ5ZXAB8vrSTpq8DZwPbAEeV2JGkiMBFg2LBhPR6omVmR1X2wOCKmRsQHgG8A53VTZ1pEjImIMQMHDqxtgGZmTS7PRNABDM0sD0nLujMdmJBjPGZmVkaeiWA+MFLSCEnbAycCs7MVJI3MLP4N8J85xmNmZmXkNkYQEesknQHMBVqAqyNisaQLgQURMRs4Q9JngHeB14Av5hWPmZmVl+dgMRExB5hTUnZ+5v3X8jy+mZltXt0Hi83MrL6cCMzMCs6JwMys4JwIzMwKLtfB4kYxa2EHU+YuZeWaTvq29GFo/9Z6h2Rm1jCavkUwa2EHk2cuomNNJwGsXb+BZ195m1kLK93bZmZWHE2fCKbMXUrnu+s3KdsQSbmZmRUgEaxc07lF5WZmRdP0iWBwW/nxgO7KzcyKpukTwaSxo2jt27JJWWvfFiaNHVWniMzMGkvTXzU0YXQ7wMarhga3tTJp7KiN5WZmRdf0iQCSZOAvfjOz8pq+a8jMzCpzIjAzKzgnAjOzgnMiMDMrOCcCM7OCU0TUO4YtImkV8Icqqw8AXskxnJ7Wm+LtTbFC74q3N8UKvSveIse6V0QMLLei1yWCLSFpQUSMqXcc1epN8famWKF3xdubYoXeFa9jLc9dQ2ZmBedEYGZWcM2eCKbVO4At1Jvi7U2xQu+KtzfFCr0rXsdaRlOPEZiZ2eY1e4vAzMw2w4nAzKzgmjYRSBonaamkZZLOrXc8lUh6TtIiSY9KWlDveEpJulrSy5KeyJT1l3SnpP9M/+5ezxi7dBPrBZI60vP7qKSj6xljlqShku6WtETSYklfS8sb7vxWiLXhzq+kfpLmSXosjfU7afkISQ+l3ws3Sdq+3rFCxXivkfRs5twemMvxm3GMQFIL8BRwJLACmA+cFBFL6hpYNyQ9B4yJiIa80UXSp4C3gOsiYv+07PvA6oi4JE20u0fEN+oZZxpXuVgvAN6KiB/UM7ZyJA0CBkXEI5J2AR4GJgCn0GDnt0KsJ9Bg51eSgJ0i4i1JfYHfAV8DzgZmRsR0SVcAj0XE5fWMFSrGezpwR0Tckufxm7VFcDCwLCKeiYi1wHRgfJ1j6rUi4jfA6pLi8cC16ftrSb4Q6q6bWBtWRLwQEY+k798EngTaacDzWyHWhhOJt9LFvukrgCOAri/VhjivUDHemmjWRNAOLM8sr6BB/8GmAvh3SQ9LmljvYKq0Z0S8kL5/EdiznsFU4QxJj6ddR3XvZilH0nBgNPAQDX5+S2KFBjy/klokPQq8DNwJPA2siYh1aZWG+l4ojTcius7txem5/ZGkHfI4drMmgt7m0Ij4KHAU8NW0e6PXiKR/sZH7GC8HPgAcCLwA/LC+4byXpJ2BnwP/HBFvZNc12vktE2tDnt+IWB8RBwJDSHoJPljnkCoqjVfS/sBkkrg/BvQHcukebNZE0AEMzSwPScsaUkR0pH9fBm4l+Ufb6F5K+4y7+o5frnM83YqIl9L/yTYAV9Jg5zftE/45cH1EzEyLG/L8lou10c9vRKwB7gYOAdokdT2ityG/FzLxjku74yIi3gF+Sk7ntlkTwXxgZHqFwPbAicDsOsdUlqSd0oE3JO0E/DXwROWtGsJs4Ivp+y8Ct9Uxloq6vlBTx9JA5zcdJPw34MmIuDSzquHOb3exNuL5lTRQUlv6vpXkwpEnSb5gj0+rNcR5hW7j/X3mx4BIxjNyObdNedUQQHoJ2/8FWoCrI+LiOodUlqT3k7QCALYDbmi0WCXdCBxGMi3uS8C3gVnADGAYybTgJ0RE3Qdpu4n1MJJuiwCeA07L9L/XlaRDgd8Ci4ANafE3SfreG+r8Voj1JBrs/Er6MMlgcAvJD94ZEXFh+v/bdJJuloXA36W/tuuqQrx3AQMBAY8Cp2cGlXvu+M2aCMzMrDrN2jVkZmZVciIwMys4JwIzs4JzIjAzKzgnAjOzgnMisIaS3kb/z5nluZKuyiz/UNLZFba/RtLx6ft7JL3n4d+S+kq6JJ3Z8xFJD0g6Kl33nKQBWxH3xuN2s35qOnvkEkmdmdkkj5c0p+sa8p4kaZCkOyqs317SbzI3WFlBORFYo7kP+ASApD4k9wPsl1n/CeD+bTzGRcAgYP90ao8JwC7buM+KIuKr6fQBRwNPR8SB6euWiDg6vZu0p51NcqdvdzGtBX4N/G0Ox7ZexInAGs39JFMBQJIAngDelLR7OuHWh4BHJJ0vab6kJyRNS++83CxJOwJfAc7supEonSJhRpm6Z6f7f6KklfI/0knAHpP0szLbXZS2EFqqjOk5SQMkDZf0+3TbpyRdL+kzku5LWy8Hp/V3Sid3mydpoaTuZtY9DvhVus1+af1H09hHpnVmAf+9mjiteblJaA0lIlZKWidpGMmv/wdIZog8BHgdWBQRayX9v4i4ECD9Mv4scHsVh9gbeL50YrdSkg4CTgU+TnJX50OS7gXWAucBn4iIVyT1L9luCknr4tTYurs19wa+AHyJZKqUk4FDgWNI7uKdAPwLcFdEfCntUpon6T8i4u1MHCOA1zJ3zZ4OXBYR16fTrnQlqSdIJjSzAnOLwBrR/SRJoCsRPJBZvi+tc7iSJ00tIpljfr9yO9oGhwK3RsTb6S39M4G/TI91c9dDhEqmffgWsFtEnL6VSQDg2YhYlE7gthj4dbqvRcDwtM5fA+cqmbL4HqAfyVQUWYOAVZnlB4BvSvoGsFdEdKbxrwfWds13ZcXkRGCNqGuc4ACSX6wPkrQIPgHcL6kf8BPg+Ig4gKQfvF+V+14GDJO0a49HnfyCP6i0lbCFsvPebMgsb+DPLXgBx2XGGYZFxJMl++kkc04i4gaSVkUnMEfSEZm6OwB/2oaYrZdzIrBGdD9JV8/qdHrj1UAbSTK4nz9/wb2iZG78bq/WKRURfySZQfOytIuka+bHL5RU/S0wQdKOSmaFPTYtuwv4gqT3pdtmv/R/BVwC/CLnX9hzgTO7xkUkjS5T5yn+3ILomtzwmYj4McmMmx9Oy98HvBIR7+YYrzU4JwJrRItIrhZ6sKTs9Yh4Jb3C5kqS1sJckl/iW+I8km6TJUoecn8HUPowmEeAa4B5JDOBXhURCyNiMXAxcK+kx4BLS7a7OY1ttpLphPNwEcmjDB+XtDhd3kQ6XvC0pL3TohOAJ9LupP2B69Lyw4Ff5BSn9RKefdSsSUk6FjgoIs6rUGcmcG5EPFW7yKzR+KohsyYVEbd2dWGVk3aNzXISMLcIzMwKzmMEZmYF50RgZlZwTgRmZgXnRGBmVnBOBGZmBfdf82rdfzULAWwAAAAASUVORK5CYII=",
"text/plain": [
""
- ],
- "image/svg+xml": "\n\n\n\n",
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEWCAYAAABrDZDcAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAdJklEQVR4nO3dfZwcVZ3v8c83QyBBHgJmZCEPJCwhCgoJRhAfAXUTWAVckAX2tVfxIboLrCveIFFEFi73hZtdvPi6US9wEfXyHEMIGIkogisgSSBAEnAwPEgyAUkIQYwjSSa/+0dVh0rT09Mzmeqe7vq+X69+TdepU1W/A53+9TlVdUoRgZmZFdeQRgdgZmaN5URgZlZwTgRmZgXnRGBmVnBOBGZmBedEYGZWcE4EZlVIer+kjkbHYZYnJwIbtCQ9K+nDjYwhIv4rIibmtX9JUyX9StKrktZKulfSCXkdz6wSJwIrNEltDTz2KcAtwA+B0cA+wIXAx/qxL0nyv2frF39wrOlIGiLpfElPSXpJ0s2S9s6sv0XSC5JeSX9tH5JZd62k70paIGkjcEza8/jvkh5Lt7lJ0rC0/tGSVme277Fuuv48Sc9LWiPps5JC0oEV2iDgcuCSiLg6Il6JiK0RcW9EfC6tc5Gk/5fZZly6v53S5XskXSrpPuDPwAxJS8qO8yVJ89P3u0j6D0nPSfqDpO9JGr6D/zusBTgRWDM6BzgJ+CCwH/AyMDuz/qfABOAtwMPAdWXbnwFcCuwO/DotOxWYBowHDgU+VeX4FetKmgacC3wYOBA4uso+JgJjgDlV6tTiH4HpJG35HjBR0oTM+jOA69P3lwEHAZPS+EaR9ECs4JwIrBl9AfhaRKyOiNeAi4BTSr+UI+KaiHg1s+4wSXtmtr8tIu5Lf4H/JS37dkSsiYj1wO0kX5Y96anuqcD3I2JFRPw5PXZP3pz+fb7WRvfg2vR4WyLiFeA24HSANCG8FZif9kCmA1+KiPUR8SrwP4HTdvD41gKcCKwZ7Q/cKmmDpA3AE0A3sI+kNkmXpcNGfwSeTbcZmdl+VYV9vpB5/2dgtyrH76nufmX7rnSckpfSv/tWqVOL8mNcT5oISHoD89Kk1A7sCjyU+e92Z1puBedEYM1oFXBcRIzIvIZFRCfJl9+JJMMzewLj0m2U2T6vKXefJznpWzKmSt0OknacXKXORpIv75K/qlCnvC13Ae2SJpEkhNKw0DqgCzgk899sz4iolvCsIJwIbLAbKmlY5rUTyVj4pZL2B5DULunEtP7uwGskv7h3JRn+qJebgTMlvU3SrsDXe6oYyfzv5wJfl3SmpD3Sk+Dvk3RlWu0R4AOSxqZDWzN7CyAiNpNciTQL2JskMRARW4GrgG9JeguApFGSpva7tdYynAhssFtA8ku29LoIuAKYD/xM0qvAb4Aj0/o/BH4PdAKPp+vqIiJ+Cnwb+CWwMnPs13qoPwf4e+DTwBrgD8D/IBnnJyLuAm4CHgMeAu6oMZTrSXpEt0TElkz5V0pxpcNmPyc5aW0FJz+Yxiwfkt4GLAd2KftCNhtU3CMwG0CSPp5er78X8E3gdicBG+ycCMwG1ueBF4GnSK5k+qfGhmPWOw8NmZkVnHsEZmYFt1OjA+irkSNHxrhx4xodhplZU3nooYfWRUTFGwibLhGMGzeOJUuW9F7RzMy2kfT7ntZ5aMjMrOCcCMzMCs6JwMys4JwIzMwKzonAzKzgmu6qITOzopm3tJNZCztYs6GL/UYMZ8bUiZw0edSA7d+JwMxsEJu3tJOZc5fRtbkbgM4NXcycuwxgwJKBh4bMzAaxWQs7tiWBkq7N3cxa2DFgx3CPwLaTdxd0sB7bbLBas6GrT+X94URg29SjCzoYj202mO03YjidFb709xsxfMCO4URg2/TUBT1vzmPcsOi5XI+99LkNbOre2pBjmw1mw4YOYYhga2ai6OFD25gxdeAeLudEUMGODFE08/BGT13N8i/oPPR0jHoc22wwG7nbLgCsWt/Fpu6tjPJVQ/nbkSGKZh/e6KkLOmrEcG76/FG5Hvu9l93dsGObFZ0TQZkdGR5p9uGNenRBezJj6sTtkmg9j21WdE4EZXZkeKTZhzfq0QXtSekYzTqsZtbMnAjK7MjwiIc3dsxJk0f5i9+sAXxDWZkZUycyfGjbdmW1DlHsyLZmZo3iHkGZ0i/S8+Y81ufhEQ9vmFkzyjURSJoGXAG0AVdHxGVl68cCPwBGpHXOj4gFecZUi5Mmj9p2crevQzoe3jCzZpPb0JCkNmA2cBxwMHC6pIPLql0A3BwRk4HTgO/kFY+ZmVWW5zmCI4CVEfF0RGwCbgROLKsTwB7p+z2BNTnGY2ZmFeQ5NDQKWJVZXg0cWVbnIuBnks4B3gR8OMd4zMysgkZfNXQ6cG1EjAaOB34k6Q0xSZouaYmkJWvXrq17kGZmrSzPRNAJjMksj07Lsj4D3AwQEQ8Aw4CR5TuKiCsjYkpETGlvb88pXDOzYsozESwGJkgaL2lnkpPB88vqPAd8CEDS20gSgX/ym5nVUW6JICK2AGcDC4EnSK4OWiHpYkknpNW+DHxO0qPADcCnIiIq79HMzPKQ630E6T0BC8rKLsy8fxx4b54xmJlZdY0+WWxmZg3mRGBmVnBOBGZmBedEYGZWcE4EZmYF50RgZlZwfh4ByUPny58hYGZWFIXvEcxb2snMucvo3NBFAJ0bupg5dxnr/vRao0MzM6uLwieCWQs76NrcvV1Z1+Zunl67sUERmZnVV+ETwZoKD5uH5EEJJ07yk8bMrPUVPhHsN2J4xfJRI4ZzxpFj6xyNmVn9FT4RzJg6keFD27YrGz60zSeMzawwCn/VUOlB8+fNeYxN3VsZlV415AfQm1lRFD4RQJIMblj0HAA3ff6oBkdjZlZfhR8aMjMrOicCM7OCK+zQUPndxMOGDmHkbrs0Oiwzs7orZCIo3U1cupGsc0MXQ9TgoMzMGqSQQ0OV7ibeGrBqfeWby8zMWlkhE0FPdxNv6t5a50jMzBqvkImg2t3EZmZFU8hE4LuJzcxeV8iTxb6b2MzsdYVMBOC7ic3MSgo5NGRmZq9zIjAzK7hcE4GkaZI6JK2UdH6F9d+S9Ej6elLShjzjMTOzN8rtHIGkNmA28BFgNbBY0vyIeLxUJyK+lKl/DjA5r3jMzKyyPHsERwArI+LpiNgE3AicWKX+6cANOcZjZmYV5HnV0ChgVWZ5NXBkpYqS9gfGA3f3sH46MB1g7Ngde3xkdrK5oW1DGLO3byIzs2IbLCeLTwPmRER3pZURcWVETImIKe3t7f0+SGmyuc4NXQTJlBLPrNvIvKWd/d6nmVmzyzMRdAJjMsuj07JKTqMOw0I9TTY3a2FH3oc2Mxu08kwEi4EJksZL2pnky35+eSVJbwX2Ah7IMRag58nmeio3MyuC3BJBRGwBzgYWAk8AN0fECkkXSzohU/U04MaIiLxiKelpsrmeys3MiiDXKSYiYgGwoKzswrLli/KMIWvG1InbPZAGPNmcmVmh5hryZHNmZm9UqEQAnmzOzKzcYLl81MzMGsSJwMys4JwIzMwKzonAzKzgnAjMzArOicDMrOCcCMzMCs6JwMys4JwIzMwKzonAzKzgnAjMzArOicDMrOCcCMzMCs6JwMys4KomAkl7SPrrCuWH5heSmZnVU4+JQNKpwG+BH0taIeldmdXX5h2YmZnVR7UewVeBd0bEJOBM4EeSPp6uU+6RmZlZXVR7QllbRDwPEBGLJB0D3CFpDJD7g+bNzKw+qvUIXs2eH0iTwtHAicAhOcdlZmZ1Uq1H8E+UDQFFxKuSpgGn5hqVmZnVTY89goh4FHhG0i/LyjdHxHW5R2ZmZnVR9fLRiOgGtkras07xmJlZnVUbGir5E7BM0l3AxlJhRPxLblGZmVnd1JII5qavPkvPJ1wBtAFXR8RlFeqcClxEciXSoxFxRn+OZWZm/dNrIoiIH/Rnx5LagNnAR4DVwGJJ8yPi8UydCcBM4L0R8bKkt/TnWGZm1n95zjV0BLAyIp6OiE3AjSSXnmZ9DpgdES8DRMSLOcZjZmYV5JkIRgGrMsur07Ksg4CDJN0n6TfpUNIbSJouaYmkJWvXrs0pXDOzYmr07KM7ARNIblQ7HbhK0ojyShFxZURMiYgp7e3tdQ7RzKy19XqOQNJBwAxg/2z9iDi2l007gTGZ5dFpWdZq4MGI2Exyz8KTJIlhce+hm5nZQKjlqqFbgO8BVwHdfdj3YmCCpPEkCeA0oPyKoHkkPYHvSxpJMlT0dB+OYWZmO6iWRLAlIr7b1x1HxBZJZwMLSS4fvSYiVki6GFgSEfPTdX8j6XGSJDMjIl7q67HMzKz/akkEt0v6Z+BW4LVSYUSs723DiFgALCgruzDzPoBz05eZmTVALYngk+nfGZmyAA4Y+HDMzKzearmhbHw9AjEzs8ao5aqhoSRTUn8gLboH+D/plT5mZtbkahka+i4wFPhOuvyPadln8wrKzMzqp5ZE8K6IOCyzfLekR/MKyMzM6quWO4u7s4+slHQAfbufwMzMBrFaegQzgF9Keprk0ZX7A2fmGpWZmdVNLVcN/SKdLnpiWtQREa9V28bMzJpHj4lA0rERcbekvytbdaAkIqJfD6sxM7PBpVqP4IPA3cDHKqwL+vnUMjMzG1x6TAQR8Y307cUR8Ux2XTqRnJmZtYBarhr6cYWyOQMdiJmZNUa1cwRvBQ4B9iw7T7AHMCzvwMzMrD6qnSOYCHwUGMH25wleJXnWsJmZtYBq5whuA26TdFREPFDHmMzMrI5quaFsqaSzSIaJtg0JRcSnc4vKzMzqppaTxT8C/gqYCtxL8uzhV/MMyszM6qeWRHBgRHwd2BgRPwD+Fjgy37DMzKxeakkEpecObJD0dmBP4C35hWRmZvVUyzmCKyXtBXwdmA/sBlxYfRMzM2sWtUw6d3X69l78nGIzs5ZT7Yayc6ttGBGXD3w4ZmZWb9V6BLunfycC7yIZFoLk5rJFeQZlZmb1U+2Gsn8DkPQr4PCIeDVdvgj4SV2iMzOz3NVy1dA+wKbM8qa0zMzMWkAtieCHwCJJF6W9gQeBa2vZuaRpkjokrZR0foX1n5K0VtIj6euzfQnezMx2XC1XDV0q6afA+9OiMyNiaW/bSWoDZgMfAVYDiyXNj4jHy6reFBFn9zFuMzMbINWuGtojIv4oaW/g2fRVWrd3RKzvZd9HACsj4ul0mxuBE4HyRGBmZg1UrUdwPck01A+RPJqyROlyb/cUjAJWZZZXU3lqipMlfQB4EvhSRKwqryBpOjAdYOzYsb0c1szM+qLHcwQR8dH07/iIOCDzGh8RA3Vj2e3AuIg4FLgL+EEPsVwZEVMiYkp7e/sAHdrMzKD60NDh1TaMiId72XcnMCazPDoty+7jpczi1cC/97JPMzMbYNWGhv6zyroAju1l34uBCemD7juB04AzshUk7RsRz6eLJwBP9LJPMzMbYNVuKDtmR3YcEVsknQ0sBNqAayJihaSLgSURMR/4F0knAFuA9cCnduSYZmbWd7XMPko6/fTBbP+Esh/2tl1ELAAWlJVdmHk/E5hZa7BmZjbwek0Ekr4BHE2SCBYAxwG/JrnRzMzMmlwtdxafAnwIeCEizgQOI3k4jZmZtYBaEkFXRGwFtkjaA3iR7a8GMjOzJlbLOYIlkkYAV5HcXPYn4IFcozIzs7qpdh/BbOD6iPjntOh7ku4E9oiIx+oSnZmZ5a5aj+BJ4D8k7QvcDNxQy2RzZmbWXKpNMXFFRBwFfBB4CbhG0m8lfUPSQXWL0MzMctXryeKI+H1EfDMiJgOnAyfhO4DNzFpGr4lA0k6SPibpOuCnQAfwd7lHZmZmdVHtZPFHSHoAx5M8rP5GYHpEbKxTbANu3tJOZi3soHNDFzu3DWHe0k5Omjyq0WGZmTVUtZPFM0meSfDliHi5TvHkZt7STmbOXUbX5m4ANnVvZebcZQBOBmZWaNVOFh8bEVe3QhIAmLWwY1sSKOna3M2shR0NisjMbHCo5c7ilrBmQ1efys3MiqIwiWC/EcP7VG5mVhSFSQQzpk5k+NC27cqGD21jxtSJDYrIzGxwqOl5BK2gdEL4vDmPsal7K6NGDGfG1Ik+UWxmhVeYRABJMrhh0XMA3PT5oxocjZnZ4FCYoSEzM6vMicDMrOCcCMzMCs6JwMys4JwIzMwKzonAzKzgnAjMzArOicDMrOByTQSSpknqkLRS0vlV6p0sKSRNyTMeMzN7o9wSgaQ2YDZwHHAwcLqkgyvU2x34IvBgXrGYmVnP8uwRHAGsjIinI2ITyRPOTqxQ7xLgm8BfcozFzMx6kGciGAWsyiyvTsu2kXQ4MCYifpJjHGZmVkXDThZLGgJcDny5hrrTJS2RtGTt2rX5B2dmViB5JoJOYExmeXRaVrI78HbgHknPAu8G5lc6YRwRV0bElIiY0t7enmPIZmbFk2ciWAxMkDRe0s7AacD80sqIeCUiRkbEuIgYB/wGOCEiluQYk5mZlcktEUTEFuBsYCHwBHBzRKyQdLGkE/I6rpmZ9U2uD6aJiAXAgrKyC3uoe3SesZiZWWW+s9jMrOCcCMzMCs6JwMys4JwIzMwKzonAzKzgnAjMzArOicDMrOCcCMzMCs6JwMys4JwIzMwKzonAzKzgnAjMzArOicDMrOCcCMzMCs6JwMys4JwIzMwKzonAzKzgnAjMzArOicDMrOCcCMzMCs6JwMys4JwIzMwKzonAzKzgnAjMzArOicDMrOCcCMzMCi7XRCBpmqQOSSslnV9h/RckLZP0iKRfSzo4z3jMzOyNcksEktqA2cBxwMHA6RW+6K+PiHdExCTg34HL84rHzMwqy7NHcASwMiKejohNwI3AidkKEfHHzOKbgMgxHjMzq2CnHPc9CliVWV4NHFleSdJZwLnAzsCxlXYkaTowHWDs2LEDHqiZWZE1/GRxRMyOiL8GvgJc0EOdKyNiSkRMaW9vr2+AZmYtLs9E0AmMySyPTst6ciNwUo7xmJlZBXkmgsXABEnjJe0MnAbMz1aQNCGz+LfA73KMx8zMKsjtHEFEbJF0NrAQaAOuiYgVki4GlkTEfOBsSR8GNgMvA5/MKx4zM6ssz5PFRMQCYEFZ2YWZ91/M8/hmZta7hp8sNjOzxnIiMDMrOCcCM7OCcyIwMyu4XE8WDxbzlnYya2EHazZ0MbRtCGP2Ht7okMzMBo2W7xHMW9rJzLnL6NzQRQCburfyzLqNzFta7d42M7PiaPlEMGthB12bu7cr2xpJuZmZFSARrNnQ1adyM7OiaflEsN+IyucDeio3Myualk8EM6ZOZPjQtu3Khg9tY8bUiQ2KyMxscGn5q4ZOmjwKYNtVQ/uNGM6MqRO3lZuZFV3LJwJIkoG/+M3MKmv5oSEzM6vOicDMrOCcCMzMCs6JwMys4JwIzMwKThHR6Bj6RNJa4Pd93GwksC6HcBqhldoCrdWeVmoLtFZ7Wqkt0L/27B8R7ZVWNF0i6A9JSyJiSqPjGAit1BZorfa0UlugtdrTSm2BgW+Ph4bMzArOicDMrOCKkgiubHQAA6iV2gKt1Z5Wagu0VntaqS0wwO0pxDkCMzPrWVF6BGZm1gMnAjOzgmvpRCBpmqQOSSslnd/oePpK0jWSXpS0PFO2t6S7JP0u/btXI2OslaQxkn4p6XFJKyR9MS1v1vYMk7RI0qNpe/4tLR8v6cH0M3eTpJ0bHWutJLVJWirpjnS5mdvyrKRlkh6RtCQta9bP2ghJcyT9VtITko4a6La0bCKQ1AbMBo4DDgZOl3RwY6Pqs2uBaWVl5wO/iIgJwC/S5WawBfhyRBwMvBs4K/3/0azteQ04NiIOAyYB0yS9G/gm8K2IOBB4GfhMA2Psqy8CT2SWm7ktAMdExKTM9fbN+lm7ArgzIt4KHEby/2hg2xIRLfkCjgIWZpZnAjMbHVc/2jEOWJ5Z7gD2Td/vC3Q0OsZ+tus24COt0B5gV+Bh4EiSuz13Ssu3+wwO5hcwOv1CORa4A1CztiWN91lgZFlZ033WgD2BZ0gv7MmrLS3bIwBGAasyy6vTsma3T0Q8n75/AdinkcH0h6RxwGTgQZq4PelQyiPAi8BdwFPAhojYklZpps/c/wLOA7amy2+medsCEMDPJD0kaXpa1oyftfHAWuD76bDd1ZLexAC3pZUTQcuL5OdAU13/K2k34MfAv0bEH7Prmq09EdEdEZNIfk0fAby1wSH1i6SPAi9GxEONjmUAvS8iDicZGj5L0geyK5vos7YTcDjw3YiYDGykbBhoINrSyomgExiTWR6dljW7P0jaFyD9+2KD46mZpKEkSeC6iJibFjdte0oiYgPwS5LhkxGSSo+AbZbP3HuBEyQ9C9xIMjx0Bc3ZFgAiojP9+yJwK0mibsbP2mpgdUQ8mC7PIUkMA9qWVk4Ei4EJ6ZUPOwOnAfMbHNNAmA98Mn3/SZKx9kFPkoD/CzwREZdnVjVre9oljUjfDyc53/EESUI4Ja3WFO2JiJkRMToixpH8O7k7Iv6BJmwLgKQ3Sdq99B74G2A5TfhZi4gXgFWSJqZFHwIeZ6Db0uiTITmfaDkeeJJk7PZrjY6nH/HfADwPbCb5ZfAZkrHbXwC/A34O7N3oOGtsy/tIuq+PAY+kr+ObuD2HAkvT9iwHLkzLDwAWASuBW4BdGh1rH9t1NHBHM7cljfvR9LWi9G+/iT9rk4Al6WdtHrDXQLfFU0yYmRVcKw8NmZlZDZwIzMwKzonAzKzgnAjMzArOicDMrOCcCGxQkfQtSf+aWV4o6erM8n9KOrfK9tdKOiV9f4+kNzzgW9JQSZelMzc+LOkBScel656VNLIfcW87bg/rZ6czYT4uqSt9/4ikUyQtKN2TMJAk7VuaSbSH9TtL+lXmpjErKCcCG2zuA94DIGkIMBI4JLP+PcD9O3iMS0gm6np7JNMQnATsvoP7rCoizopkOorjgacimRVzUkTMiYjjI7k7eaCdC1xVJaZNJNei/30Ox7Ym4kRgg839JFM1QJIAlgOvStpL0i7A24CHJV0oabGk5ZKuTO9c7pWkXYHPAedExGsAEfGHiLi5Qt1z0/0vL+ul/DdJj6XPIvhRhe0uSXsIbTXG9KykkZLGpXPOXyvpSUnXSfqwpPvS3ssRaf03KXlWxaJ0IrITe9j1ycCd6TaHpPUfSWOfkNaZB/xDLXFa63KX0AaViFgjaYuksSS//h8gmfXyKOAVYFlEbJL0vyPiYoD0y/ijwO01HOJA4Lkom/CunKR3AmeSTC0t4EFJ9wKbgAuA90TEOkl7l203i6R3cWb0727NA4FPAJ8mmSblDJK7sk8AvkrSe/kayTQQn06HlBZJ+nlEbMzEMR54uZTsgC8AV0TEdemUK6UktRx4Vz/itBbiHoENRveTJIFSInggs3xfWucYJU/PWkYySdohlXa0A94H3BoRGyPiT8Bc4P3psW6JiHUAEbE+s83XgT0j4gv9TAIAz0TEsojYSjI9wi/SfS0jeTYFJHPnnJ9OgX0PMAwYW7affUmmLy55APiqpK8A+0dEVxp/N7CpNDePFZMTgQ1GpfME7yD5xfobkh7Be4D7JQ0DvgOcEhHvIBkHH1bjvlcCYyXtMeBRJ7/g31neS+ij1zLvt2aWt/J6D17AyZnzDGMjIvtkMYAuMv9NIuJ6kl5FF7BA0rGZursAf9mBmK3JORHYYHQ/yVDP+kjm/F8PjCBJBvfz+hfcuvT5Bj1erVMuIv5MMgvqFekQSWkm0U+UVf0v4CRJu6YzWH48Lbsb+ISkN6fbZr/07wQuA36S8y/shcA5pfMikiZXqPMkr/cgkHQA8HREfJtkpspD0/I3A+siYnOO8dog50Rgg9EykquFflNW9kpErEuvsLmKpLewkOSXeF9cQDJs8rik5SSPZix/SM7DJM+MXkTyJLWrI2JpRKwALgXulfQocHnZdreksc1Pp6fOwyXAUOAxSSvS5e2k5wueknRgWnQqsDwdTno78MO0/BjgJznFaU3Cs4+atShJHwfeGREXVKkzFzg/Ip6sX2Q22PiqIbMWFRG3loawKkmHxuY5CZh7BGZmBedzBGZmBedEYGZWcE4EZmYF50RgZlZwTgRmZgX3/wHbnpzjbITbNQAAAABJRU5ErkJggg=="
+ ]
},
"metadata": {
"needs_background": "light"
@@ -523,14 +483,14 @@
"cell_type": "code",
"execution_count": 13,
"source": [
- "print('flaml r2', '=', 1 - sklearn_metric_loss_score('r2', y_pred, y_test))"
+ "print('flaml (4min) r2', '=', 1 - sklearn_metric_loss_score('r2', y_pred, y_test))"
],
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
- "flaml r2 = 0.8540590968156087\n"
+ "flaml (4min) r2 = 0.8505434326525669\n"
]
}
],
@@ -625,7 +585,7 @@
" \"objective\": \"regression\",\n",
" \"metric\": \"regression\",\n",
" \"verbosity\": -1,\n",
- "}\n"
+ "}"
],
"outputs": [],
"metadata": {}
@@ -642,89 +602,89 @@
"output_type": "stream",
"name": "stderr",
"text": [
- "\u001b[32m[I 2021-08-22 21:13:20,495]\u001b[0m A new study created in memory with name: no-name-11015170-733e-470d-817a-413f55382d0c\u001b[0m\n",
- "feature_fraction, val_score: 1923826918.442117: 14%|#4 | 1/7 [00:01<00:08, 1.49s/it]\u001b[32m[I 2021-08-22 21:13:22,006]\u001b[0m Trial 0 finished with value: 1923826918.4421172 and parameters: {'feature_fraction': 0.8999999999999999}. Best is trial 0 with value: 1923826918.4421172.\u001b[0m\n",
- "feature_fraction, val_score: 1923826918.442117: 29%|##8 | 2/7 [00:02<00:07, 1.44s/it]\u001b[32m[I 2021-08-22 21:13:23,335]\u001b[0m Trial 1 finished with value: 1935542284.5841475 and parameters: {'feature_fraction': 0.6}. Best is trial 0 with value: 1923826918.4421172.\u001b[0m\n",
- "feature_fraction, val_score: 1923826918.442117: 43%|####2 | 3/7 [00:04<00:05, 1.44s/it]\u001b[32m[I 2021-08-22 21:13:24,762]\u001b[0m Trial 2 finished with value: 1959693958.979498 and parameters: {'feature_fraction': 0.7}. Best is trial 0 with value: 1923826918.4421172.\u001b[0m\n",
- "feature_fraction, val_score: 1923826918.442117: 57%|#####7 | 4/7 [00:05<00:04, 1.38s/it]\u001b[32m[I 2021-08-22 21:13:26,021]\u001b[0m Trial 3 finished with value: 2237193094.1989536 and parameters: {'feature_fraction': 0.4}. Best is trial 0 with value: 1923826918.4421172.\u001b[0m\n",
- "feature_fraction, val_score: 1923826918.442117: 71%|#######1 | 5/7 [00:06<00:02, 1.36s/it]\u001b[32m[I 2021-08-22 21:13:27,314]\u001b[0m Trial 4 finished with value: 1988198059.953293 and parameters: {'feature_fraction': 0.5}. Best is trial 0 with value: 1923826918.4421172.\u001b[0m\n",
- "feature_fraction, val_score: 1923826918.442117: 86%|########5 | 6/7 [00:08<00:01, 1.37s/it]\u001b[32m[I 2021-08-22 21:13:28,719]\u001b[0m Trial 5 finished with value: 1959693958.979498 and parameters: {'feature_fraction': 0.8}. Best is trial 0 with value: 1923826918.4421172.\u001b[0m\n",
- "feature_fraction, val_score: 1923826918.442117: 100%|##########| 7/7 [00:09<00:00, 1.42s/it]\u001b[32m[I 2021-08-22 21:13:30,240]\u001b[0m Trial 6 finished with value: 1961388150.442425 and parameters: {'feature_fraction': 1.0}. Best is trial 0 with value: 1923826918.4421172.\u001b[0m\n",
- "feature_fraction, val_score: 1923826918.442117: 100%|##########| 7/7 [00:09<00:00, 1.39s/it]\n",
- "num_leaves, val_score: 1902337773.833954: 5%|5 | 1/20 [00:02<00:41, 2.21s/it]\u001b[32m[I 2021-08-22 21:13:32,454]\u001b[0m Trial 7 finished with value: 1902337773.8339543 and parameters: {'num_leaves': 62}. Best is trial 7 with value: 1902337773.8339543.\u001b[0m\n",
- "num_leaves, val_score: 1892120308.436302: 10%|# | 2/20 [00:04<00:42, 2.35s/it]\u001b[32m[I 2021-08-22 21:13:35,126]\u001b[0m Trial 8 finished with value: 1892120308.4363017 and parameters: {'num_leaves': 78}. Best is trial 8 with value: 1892120308.4363017.\u001b[0m\n",
- "num_leaves, val_score: 1892120308.436302: 15%|#5 | 3/20 [00:06<00:34, 2.03s/it]\u001b[32m[I 2021-08-22 21:13:36,422]\u001b[0m Trial 9 finished with value: 1924633212.447515 and parameters: {'num_leaves': 26}. Best is trial 8 with value: 1892120308.4363017.\u001b[0m\n",
- "num_leaves, val_score: 1892120308.436302: 20%|## | 4/20 [00:07<00:27, 1.70s/it]\u001b[32m[I 2021-08-22 21:13:37,340]\u001b[0m Trial 10 finished with value: 1975840134.2036633 and parameters: {'num_leaves': 12}. Best is trial 8 with value: 1892120308.4363017.\u001b[0m\n",
- "num_leaves, val_score: 1892120308.436302: 25%|##5 | 5/20 [00:13<00:46, 3.12s/it]\u001b[32m[I 2021-08-22 21:13:43,773]\u001b[0m Trial 11 finished with value: 1923702276.2852578 and parameters: {'num_leaves': 204}. Best is trial 8 with value: 1892120308.4363017.\u001b[0m\n",
- "num_leaves, val_score: 1892120308.436302: 30%|### | 6/20 [00:20<00:59, 4.24s/it]\u001b[32m[I 2021-08-22 21:13:50,646]\u001b[0m Trial 12 finished with value: 1939984702.0007648 and parameters: {'num_leaves': 214}. Best is trial 8 with value: 1892120308.4363017.\u001b[0m\n",
- "num_leaves, val_score: 1892120308.436302: 35%|###5 | 7/20 [00:23<00:49, 3.79s/it]\u001b[32m[I 2021-08-22 21:13:53,368]\u001b[0m Trial 13 finished with value: 1942261187.1568937 and parameters: {'num_leaves': 61}. Best is trial 8 with value: 1892120308.4363017.\u001b[0m\n",
- "num_leaves, val_score: 1892120308.436302: 40%|#### | 8/20 [00:33<01:09, 5.81s/it]\u001b[32m[I 2021-08-22 21:14:03,909]\u001b[0m Trial 14 finished with value: 1962322296.1656826 and parameters: {'num_leaves': 234}. Best is trial 8 with value: 1892120308.4363017.\u001b[0m\n",
- "num_leaves, val_score: 1892120308.436302: 45%|####5 | 9/20 [00:38<01:00, 5.52s/it]\u001b[32m[I 2021-08-22 21:14:08,728]\u001b[0m Trial 15 finished with value: 1933575055.0360022 and parameters: {'num_leaves': 131}. Best is trial 8 with value: 1892120308.4363017.\u001b[0m\n",
- "num_leaves, val_score: 1892120308.436302: 50%|##### | 10/20 [00:41<00:47, 4.70s/it]\u001b[32m[I 2021-08-22 21:14:11,527]\u001b[0m Trial 16 finished with value: 1907396468.702243 and parameters: {'num_leaves': 64}. Best is trial 8 with value: 1892120308.4363017.\u001b[0m\n",
- "num_leaves, val_score: 1880656950.656438: 55%|#####5 | 11/20 [00:46<00:43, 4.83s/it]\u001b[32m[I 2021-08-22 21:14:16,641]\u001b[0m Trial 17 finished with value: 1880656950.6564376 and parameters: {'num_leaves': 141}. Best is trial 17 with value: 1880656950.6564376.\u001b[0m\n",
- "num_leaves, val_score: 1880656950.656438: 60%|###### | 12/20 [00:51<00:38, 4.80s/it]\u001b[32m[I 2021-08-22 21:14:21,399]\u001b[0m Trial 18 finished with value: 1906428309.75546 and parameters: {'num_leaves': 139}. Best is trial 17 with value: 1880656950.6564376.\u001b[0m\n",
- "num_leaves, val_score: 1880656950.656438: 65%|######5 | 13/20 [00:56<00:35, 5.07s/it]\u001b[32m[I 2021-08-22 21:14:27,074]\u001b[0m Trial 19 finished with value: 1897071192.2731016 and parameters: {'num_leaves': 161}. Best is trial 17 with value: 1880656950.6564376.\u001b[0m\n",
- "num_leaves, val_score: 1880656950.656438: 70%|####### | 14/20 [01:01<00:28, 4.81s/it]\u001b[32m[I 2021-08-22 21:14:31,276]\u001b[0m Trial 20 finished with value: 1910775598.9420693 and parameters: {'num_leaves': 95}. Best is trial 17 with value: 1880656950.6564376.\u001b[0m\n",
- "num_leaves, val_score: 1880656950.656438: 75%|#######5 | 15/20 [01:04<00:22, 4.44s/it]\u001b[32m[I 2021-08-22 21:14:34,857]\u001b[0m Trial 21 finished with value: 1890350018.7742429 and parameters: {'num_leaves': 101}. Best is trial 17 with value: 1880656950.6564376.\u001b[0m\n",
- "num_leaves, val_score: 1874647481.354196: 80%|######## | 16/20 [01:10<00:19, 4.84s/it]\u001b[32m[I 2021-08-22 21:14:40,645]\u001b[0m Trial 22 finished with value: 1874647481.354196 and parameters: {'num_leaves': 174}. Best is trial 22 with value: 1874647481.354196.\u001b[0m\n",
- "num_leaves, val_score: 1874647481.354196: 85%|########5 | 17/20 [01:16<00:15, 5.12s/it]\u001b[32m[I 2021-08-22 21:14:46,424]\u001b[0m Trial 23 finished with value: 1929626032.4915411 and parameters: {'num_leaves': 176}. Best is trial 22 with value: 1874647481.354196.\u001b[0m\n",
- "num_leaves, val_score: 1874647481.354196: 90%|######### | 18/20 [01:22<00:10, 5.35s/it]\u001b[32m[I 2021-08-22 21:14:52,288]\u001b[0m Trial 24 finished with value: 1926786945.429698 and parameters: {'num_leaves': 177}. Best is trial 22 with value: 1874647481.354196.\u001b[0m\n",
- "num_leaves, val_score: 1874647481.354196: 95%|#########5| 19/20 [01:30<00:06, 6.26s/it]\u001b[32m[I 2021-08-22 21:15:00,672]\u001b[0m Trial 25 finished with value: 1936436149.7610657 and parameters: {'num_leaves': 248}. Best is trial 22 with value: 1874647481.354196.\u001b[0m\n",
- "num_leaves, val_score: 1870787631.458499: 100%|##########| 20/20 [01:35<00:00, 5.93s/it]\u001b[32m[I 2021-08-22 21:15:05,849]\u001b[0m Trial 26 finished with value: 1870787631.4584987 and parameters: {'num_leaves': 152}. Best is trial 26 with value: 1870787631.4584987.\u001b[0m\n",
- "num_leaves, val_score: 1870787631.458499: 100%|##########| 20/20 [01:35<00:00, 4.78s/it]\n",
- "bagging, val_score: 1870787631.458499: 10%|# | 1/10 [00:07<01:06, 7.43s/it]\u001b[32m[I 2021-08-22 21:15:13,289]\u001b[0m Trial 27 finished with value: 2237757312.870728 and parameters: {'bagging_fraction': 0.44000087334449334, 'bagging_freq': 6}. Best is trial 27 with value: 2237757312.870728.\u001b[0m\n",
- "bagging, val_score: 1870787631.458499: 20%|## | 2/10 [00:15<01:01, 7.71s/it]\u001b[32m[I 2021-08-22 21:15:21,655]\u001b[0m Trial 28 finished with value: 2162729069.0272393 and parameters: {'bagging_fraction': 0.5075440331178458, 'bagging_freq': 3}. Best is trial 28 with value: 2162729069.0272393.\u001b[0m\n",
- "bagging, val_score: 1870787631.458499: 30%|### | 3/10 [00:21<00:49, 7.05s/it]\u001b[32m[I 2021-08-22 21:15:27,150]\u001b[0m Trial 29 finished with value: 2003355452.8831115 and parameters: {'bagging_fraction': 0.757776235401641, 'bagging_freq': 1}. Best is trial 29 with value: 2003355452.8831115.\u001b[0m\n",
- "bagging, val_score: 1870787631.458499: 40%|#### | 4/10 [00:28<00:42, 7.02s/it]\u001b[32m[I 2021-08-22 21:15:34,105]\u001b[0m Trial 30 finished with value: 2169017536.089679 and parameters: {'bagging_fraction': 0.5470758964212703, 'bagging_freq': 4}. Best is trial 29 with value: 2003355452.8831115.\u001b[0m\n",
- "bagging, val_score: 1870787631.458499: 50%|##### | 5/10 [00:33<00:32, 6.49s/it]\u001b[32m[I 2021-08-22 21:15:39,358]\u001b[0m Trial 31 finished with value: 1949886129.0973551 and parameters: {'bagging_fraction': 0.7729694462744219, 'bagging_freq': 1}. Best is trial 31 with value: 1949886129.0973551.\u001b[0m\n",
- "bagging, val_score: 1870787631.458499: 60%|###### | 6/10 [00:40<00:26, 6.53s/it]\u001b[32m[I 2021-08-22 21:15:45,996]\u001b[0m Trial 32 finished with value: 2082597134.5380604 and parameters: {'bagging_fraction': 0.6293524485160634, 'bagging_freq': 6}. Best is trial 31 with value: 1949886129.0973551.\u001b[0m\n",
- "bagging, val_score: 1870787631.458499: 70%|####### | 7/10 [00:46<00:19, 6.57s/it]\u001b[32m[I 2021-08-22 21:15:52,653]\u001b[0m Trial 33 finished with value: 2128522268.181099 and parameters: {'bagging_fraction': 0.5194460357854906, 'bagging_freq': 4}. Best is trial 31 with value: 1949886129.0973551.\u001b[0m\n",
- "bagging, val_score: 1870787631.458499: 80%|######## | 8/10 [00:53<00:12, 6.49s/it]\u001b[32m[I 2021-08-22 21:15:58,939]\u001b[0m Trial 34 finished with value: 1972329936.356194 and parameters: {'bagging_fraction': 0.7021495661140726, 'bagging_freq': 2}. Best is trial 31 with value: 1949886129.0973551.\u001b[0m\n",
- "bagging, val_score: 1870787631.458499: 90%|######### | 9/10 [00:58<00:06, 6.15s/it]\u001b[32m[I 2021-08-22 21:16:04,316]\u001b[0m Trial 35 finished with value: 2035847515.436036 and parameters: {'bagging_fraction': 0.7365019160691924, 'bagging_freq': 1}. Best is trial 31 with value: 1949886129.0973551.\u001b[0m\n",
- "bagging, val_score: 1870787631.458499: 100%|##########| 10/10 [01:05<00:00, 6.27s/it]\u001b[32m[I 2021-08-22 21:16:10,869]\u001b[0m Trial 36 finished with value: 2089685881.7609503 and parameters: {'bagging_fraction': 0.5702856203071842, 'bagging_freq': 6}. Best is trial 31 with value: 1949886129.0973551.\u001b[0m\n",
- "bagging, val_score: 1870787631.458499: 100%|##########| 10/10 [01:05<00:00, 6.50s/it]\n",
- "feature_fraction_stage2, val_score: 1870787631.458499: 17%|#6 | 1/6 [00:05<00:25, 5.10s/it]\u001b[32m[I 2021-08-22 21:16:15,976]\u001b[0m Trial 37 finished with value: 1915845450.4267912 and parameters: {'feature_fraction': 0.9799999999999999}. Best is trial 37 with value: 1915845450.4267912.\u001b[0m\n",
- "feature_fraction_stage2, val_score: 1870787631.458499: 33%|###3 | 2/6 [00:09<00:20, 5.02s/it]\u001b[32m[I 2021-08-22 21:16:20,814]\u001b[0m Trial 38 finished with value: 1870787631.4584987 and parameters: {'feature_fraction': 0.852}. Best is trial 38 with value: 1870787631.4584987.\u001b[0m\n",
- "feature_fraction_stage2, val_score: 1870787631.458499: 50%|##### | 3/6 [00:14<00:14, 4.95s/it]\u001b[32m[I 2021-08-22 21:16:25,594]\u001b[0m Trial 39 finished with value: 1870787631.4584987 and parameters: {'feature_fraction': 0.8839999999999999}. Best is trial 38 with value: 1870787631.4584987.\u001b[0m\n",
- "feature_fraction_stage2, val_score: 1870787631.458499: 67%|######6 | 4/6 [00:20<00:10, 5.08s/it]\u001b[32m[I 2021-08-22 21:16:30,990]\u001b[0m Trial 40 finished with value: 1915845450.4267912 and parameters: {'feature_fraction': 0.948}. Best is trial 38 with value: 1870787631.4584987.\u001b[0m\n",
- "feature_fraction_stage2, val_score: 1870787631.458499: 83%|########3 | 5/6 [00:25<00:05, 5.19s/it]\u001b[32m[I 2021-08-22 21:16:36,418]\u001b[0m Trial 41 finished with value: 1870787631.4584987 and parameters: {'feature_fraction': 0.9159999999999999}. Best is trial 38 with value: 1870787631.4584987.\u001b[0m\n",
- "feature_fraction_stage2, val_score: 1870787631.458499: 100%|##########| 6/6 [00:31<00:00, 5.39s/it]\u001b[32m[I 2021-08-22 21:16:42,282]\u001b[0m Trial 42 finished with value: 1870787631.4584987 and parameters: {'feature_fraction': 0.82}. Best is trial 38 with value: 1870787631.4584987.\u001b[0m\n",
- "feature_fraction_stage2, val_score: 1870787631.458499: 100%|##########| 6/6 [00:31<00:00, 5.24s/it]\n",
- "regularization_factors, val_score: 1870787631.458499: 5%|5 | 1/20 [00:05<01:41, 5.36s/it]\u001b[32m[I 2021-08-22 21:16:47,653]\u001b[0m Trial 43 finished with value: 1870787631.491234 and parameters: {'lambda_l1': 6.212193776886605e-06, 'lambda_l2': 3.009357838100163e-08}. Best is trial 43 with value: 1870787631.491234.\u001b[0m\n",
- "regularization_factors, val_score: 1870787534.973267: 10%|# | 2/20 [00:12<01:44, 5.82s/it]\u001b[32m[I 2021-08-22 21:16:54,524]\u001b[0m Trial 44 finished with value: 1870787534.9732666 and parameters: {'lambda_l1': 4.443479994016017e-08, 'lambda_l2': 3.556819404354524e-05}. Best is trial 44 with value: 1870787534.9732666.\u001b[0m\n",
- "regularization_factors, val_score: 1870787534.973267: 15%|#5 | 3/20 [00:17<01:36, 5.65s/it]\u001b[32m[I 2021-08-22 21:16:59,795]\u001b[0m Trial 45 finished with value: 1870787622.4979687 and parameters: {'lambda_l1': 0.014465195791714083, 'lambda_l2': 2.1021138174252987e-07}. Best is trial 44 with value: 1870787534.9732666.\u001b[0m\n",
- "regularization_factors, val_score: 1870787534.973267: 20%|## | 4/20 [00:22<01:28, 5.52s/it]\u001b[32m[I 2021-08-22 21:17:05,006]\u001b[0m Trial 46 finished with value: 1870787619.5326774 and parameters: {'lambda_l1': 1.429709851157171e-06, 'lambda_l2': 4.419238564285042e-06}. Best is trial 44 with value: 1870787534.9732666.\u001b[0m\n",
- "regularization_factors, val_score: 1870787534.973267: 25%|##5 | 5/20 [00:28<01:22, 5.51s/it]\u001b[32m[I 2021-08-22 21:17:10,496]\u001b[0m Trial 47 finished with value: 1870787590.2959824 and parameters: {'lambda_l1': 0.06926729801332972, 'lambda_l2': 1.6922599199508456e-08}. Best is trial 44 with value: 1870787534.9732666.\u001b[0m\n",
- "regularization_factors, val_score: 1870787534.973267: 30%|### | 6/20 [00:33<01:16, 5.44s/it]\u001b[32m[I 2021-08-22 21:17:15,768]\u001b[0m Trial 48 finished with value: 1870787630.8453631 and parameters: {'lambda_l1': 2.052098028013423e-08, 'lambda_l2': 2.3337138419589934e-07}. Best is trial 44 with value: 1870787534.9732666.\u001b[0m\n",
- "regularization_factors, val_score: 1870787534.973267: 35%|###5 | 7/20 [00:38<01:10, 5.39s/it]\u001b[32m[I 2021-08-22 21:17:21,046]\u001b[0m Trial 49 finished with value: 1876699891.9815595 and parameters: {'lambda_l1': 4.304944401004946e-06, 'lambda_l2': 0.0007656130560392606}. Best is trial 44 with value: 1870787534.9732666.\u001b[0m\n",
- "regularization_factors, val_score: 1870787534.973267: 40%|#### | 8/20 [00:44<01:04, 5.37s/it]\u001b[32m[I 2021-08-22 21:17:26,372]\u001b[0m Trial 50 finished with value: 1894915643.8553405 and parameters: {'lambda_l1': 5.783302142631901e-07, 'lambda_l2': 0.005857904967523283}. Best is trial 44 with value: 1870787534.9732666.\u001b[0m\n",
- "regularization_factors, val_score: 1870787534.973267: 45%|####5 | 9/20 [00:49<00:58, 5.32s/it]\u001b[32m[I 2021-08-22 21:17:31,570]\u001b[0m Trial 51 finished with value: 1877059069.215943 and parameters: {'lambda_l1': 2.9196198134708893, 'lambda_l2': 4.6093049397982125e-05}. Best is trial 44 with value: 1870787534.9732666.\u001b[0m\n",
- "regularization_factors, val_score: 1853015384.453637: 50%|##### | 10/20 [00:54<00:53, 5.34s/it]\u001b[32m[I 2021-08-22 21:17:36,944]\u001b[0m Trial 52 finished with value: 1853015384.453637 and parameters: {'lambda_l1': 0.09558504914610533, 'lambda_l2': 3.220273228431258}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n",
- "regularization_factors, val_score: 1853015384.453637: 55%|#####5 | 11/20 [01:00<00:49, 5.45s/it]\u001b[32m[I 2021-08-22 21:17:42,672]\u001b[0m Trial 53 finished with value: 1896992309.3706267 and parameters: {'lambda_l1': 6.575749289036579, 'lambda_l2': 7.662096538085835}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n",
- "regularization_factors, val_score: 1853015384.453637: 60%|###### | 12/20 [01:06<00:44, 5.56s/it]\u001b[32m[I 2021-08-22 21:17:48,473]\u001b[0m Trial 54 finished with value: 1893493622.3798478 and parameters: {'lambda_l1': 0.0008722383951977965, 'lambda_l2': 0.13339065517857865}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n",
- "regularization_factors, val_score: 1853015384.453637: 65%|######5 | 13/20 [01:12<00:39, 5.68s/it]\u001b[32m[I 2021-08-22 21:17:54,425]\u001b[0m Trial 55 finished with value: 1895459424.4650118 and parameters: {'lambda_l1': 0.046018652714269824, 'lambda_l2': 3.596577171855534}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n",
- "regularization_factors, val_score: 1853015384.453637: 70%|####### | 14/20 [01:17<00:33, 5.57s/it]\u001b[32m[I 2021-08-22 21:17:59,760]\u001b[0m Trial 56 finished with value: 1902235965.6523015 and parameters: {'lambda_l1': 0.0004372538444200538, 'lambda_l2': 0.018403234102680837}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n",
- "regularization_factors, val_score: 1853015384.453637: 75%|#######5 | 15/20 [01:23<00:27, 5.57s/it]\u001b[32m[I 2021-08-22 21:18:05,311]\u001b[0m Trial 57 finished with value: 1870787547.884662 and parameters: {'lambda_l1': 1.5644444125721077e-08, 'lambda_l2': 3.06684587723285e-05}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n",
- "regularization_factors, val_score: 1853015384.453637: 80%|######## | 16/20 [01:28<00:22, 5.56s/it]\u001b[32m[I 2021-08-22 21:18:10,860]\u001b[0m Trial 58 finished with value: 1952628057.3679152 and parameters: {'lambda_l1': 0.41090501100060367, 'lambda_l2': 0.4784149571785825}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n",
- "regularization_factors, val_score: 1853015384.453637: 85%|########5 | 17/20 [01:34<00:16, 5.66s/it]\u001b[32m[I 2021-08-22 21:18:16,748]\u001b[0m Trial 59 finished with value: 1874516545.805995 and parameters: {'lambda_l1': 4.63176126114126e-05, 'lambda_l2': 0.0002597320589400073}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n",
- "regularization_factors, val_score: 1853015384.453637: 90%|######### | 18/20 [01:40<00:11, 5.80s/it]\u001b[32m[I 2021-08-22 21:18:22,861]\u001b[0m Trial 60 finished with value: 1870787615.0912282 and parameters: {'lambda_l1': 0.007392465833323452, 'lambda_l2': 4.3888082066628725e-06}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n",
- "regularization_factors, val_score: 1853015384.453637: 95%|#########5| 19/20 [01:46<00:05, 5.72s/it]\u001b[32m[I 2021-08-22 21:18:28,403]\u001b[0m Trial 61 finished with value: 1859899631.3896043 and parameters: {'lambda_l1': 1.573671858757602e-07, 'lambda_l2': 0.0021763888923074476}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n",
- "regularization_factors, val_score: 1853015384.453637: 100%|##########| 20/20 [01:51<00:00, 5.70s/it]\u001b[32m[I 2021-08-22 21:18:34,066]\u001b[0m Trial 62 finished with value: 1905701773.7289548 and parameters: {'lambda_l1': 0.7977530430827201, 'lambda_l2': 0.6661855838737094}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n",
- "regularization_factors, val_score: 1853015384.453637: 100%|##########| 20/20 [01:51<00:00, 5.59s/it]\n",
- "min_data_in_leaf, val_score: 1853015384.453637: 20%|## | 1/5 [00:04<00:19, 4.95s/it]\u001b[32m[I 2021-08-22 21:18:39,027]\u001b[0m Trial 63 finished with value: 1859286747.0773554 and parameters: {'min_child_samples': 10}. Best is trial 63 with value: 1859286747.0773554.\u001b[0m\n",
- "min_data_in_leaf, val_score: 1853015384.453637: 40%|#### | 2/5 [00:09<00:14, 4.87s/it]\u001b[32m[I 2021-08-22 21:18:43,705]\u001b[0m Trial 64 finished with value: 1877906183.7743464 and parameters: {'min_child_samples': 5}. Best is trial 63 with value: 1859286747.0773554.\u001b[0m\n",
- "min_data_in_leaf, val_score: 1853015384.453637: 60%|###### | 3/5 [00:15<00:10, 5.15s/it]\u001b[32m[I 2021-08-22 21:18:49,514]\u001b[0m Trial 65 finished with value: 1996406986.7947733 and parameters: {'min_child_samples': 100}. Best is trial 63 with value: 1859286747.0773554.\u001b[0m\n",
- "min_data_in_leaf, val_score: 1853015384.453637: 80%|######## | 4/5 [00:22<00:05, 5.82s/it]\u001b[32m[I 2021-08-22 21:18:56,904]\u001b[0m Trial 66 finished with value: 1983678395.4383106 and parameters: {'min_child_samples': 50}. Best is trial 63 with value: 1859286747.0773554.\u001b[0m\n",
- "min_data_in_leaf, val_score: 1853015384.453637: 100%|##########| 5/5 [00:28<00:00, 5.92s/it]\u001b[32m[I 2021-08-22 21:19:03,042]\u001b[0m Trial 67 finished with value: 1906448776.6538603 and parameters: {'min_child_samples': 25}. Best is trial 63 with value: 1859286747.0773554.\u001b[0m\n",
- "min_data_in_leaf, val_score: 1853015384.453637: 100%|##########| 5/5 [00:28<00:00, 5.79s/it]"
+ "\u001b[32m[I 2021-09-29 23:14:13,542]\u001b[0m A new study created in memory with name: no-name-c5c149a5-8d21-451d-8907-5246d780db77\u001b[0m\n",
+ "feature_fraction, val_score: 2237193094.198954: 14%|#4 | 1/7 [00:01<00:09, 1.51s/it]\u001b[32m[I 2021-09-29 23:14:15,079]\u001b[0m Trial 0 finished with value: 2237193094.1989536 and parameters: {'feature_fraction': 0.4}. Best is trial 0 with value: 2237193094.1989536.\u001b[0m\n",
+ "feature_fraction, val_score: 1961388150.442425: 29%|##8 | 2/7 [00:03<00:07, 1.56s/it]\u001b[32m[I 2021-09-29 23:14:16,753]\u001b[0m Trial 1 finished with value: 1961388150.442425 and parameters: {'feature_fraction': 1.0}. Best is trial 1 with value: 1961388150.442425.\u001b[0m\n",
+ "feature_fraction, val_score: 1961388150.442425: 43%|####2 | 3/7 [00:04<00:06, 1.51s/it]\u001b[32m[I 2021-09-29 23:14:18,158]\u001b[0m Trial 2 finished with value: 1988198059.953293 and parameters: {'feature_fraction': 0.5}. Best is trial 1 with value: 1961388150.442425.\u001b[0m\n",
+ "feature_fraction, val_score: 1959693958.979498: 57%|#####7 | 4/7 [00:06<00:04, 1.52s/it]\u001b[32m[I 2021-09-29 23:14:19,698]\u001b[0m Trial 3 finished with value: 1959693958.979498 and parameters: {'feature_fraction': 0.8}. Best is trial 3 with value: 1959693958.979498.\u001b[0m\n",
+ "feature_fraction, val_score: 1923826918.442117: 71%|#######1 | 5/7 [00:07<00:03, 1.55s/it]\u001b[32m[I 2021-09-29 23:14:21,298]\u001b[0m Trial 4 finished with value: 1923826918.4421172 and parameters: {'feature_fraction': 0.8999999999999999}. Best is trial 4 with value: 1923826918.4421172.\u001b[0m\n",
+ "feature_fraction, val_score: 1923826918.442117: 86%|########5 | 6/7 [00:09<00:01, 1.54s/it]\u001b[32m[I 2021-09-29 23:14:22,813]\u001b[0m Trial 5 finished with value: 1959693958.979498 and parameters: {'feature_fraction': 0.7}. Best is trial 4 with value: 1923826918.4421172.\u001b[0m\n",
+ "feature_fraction, val_score: 1923826918.442117: 100%|##########| 7/7 [00:10<00:00, 1.50s/it]\u001b[32m[I 2021-09-29 23:14:24,238]\u001b[0m Trial 6 finished with value: 1935542284.5841475 and parameters: {'feature_fraction': 0.6}. Best is trial 4 with value: 1923826918.4421172.\u001b[0m\n",
+ "feature_fraction, val_score: 1923826918.442117: 100%|##########| 7/7 [00:10<00:00, 1.53s/it]\n",
+ "num_leaves, val_score: 1894793944.507313: 5%|5 | 1/20 [00:02<00:47, 2.51s/it]\u001b[32m[I 2021-09-29 23:14:26,757]\u001b[0m Trial 7 finished with value: 1894793944.5073128 and parameters: {'num_leaves': 63}. Best is trial 7 with value: 1894793944.5073128.\u001b[0m\n",
+ "num_leaves, val_score: 1894793944.507313: 10%|# | 2/20 [00:10<01:17, 4.28s/it]\u001b[32m[I 2021-09-29 23:14:35,165]\u001b[0m Trial 8 finished with value: 1927707783.0138607 and parameters: {'num_leaves': 231}. Best is trial 7 with value: 1894793944.5073128.\u001b[0m\n",
+ "num_leaves, val_score: 1894793944.507313: 15%|#5 | 3/20 [00:13<01:06, 3.90s/it]\u001b[32m[I 2021-09-29 23:14:38,165]\u001b[0m Trial 9 finished with value: 1898863555.537152 and parameters: {'num_leaves': 80}. Best is trial 7 with value: 1894793944.5073128.\u001b[0m\n",
+ "num_leaves, val_score: 1894793944.507313: 20%|## | 4/20 [00:20<01:13, 4.60s/it]\u001b[32m[I 2021-09-29 23:14:44,401]\u001b[0m Trial 10 finished with value: 1895572554.0884657 and parameters: {'num_leaves': 170}. Best is trial 7 with value: 1894793944.5073128.\u001b[0m\n",
+ "num_leaves, val_score: 1894793944.507313: 25%|##5 | 5/20 [00:27<01:23, 5.55s/it]\u001b[32m[I 2021-09-29 23:14:52,188]\u001b[0m Trial 11 finished with value: 1932937258.238013 and parameters: {'num_leaves': 221}. Best is trial 7 with value: 1894793944.5073128.\u001b[0m\n",
+ "num_leaves, val_score: 1881619881.755456: 30%|### | 6/20 [00:32<01:12, 5.19s/it]\u001b[32m[I 2021-09-29 23:14:56,509]\u001b[0m Trial 12 finished with value: 1881619881.7554562 and parameters: {'num_leaves': 116}. Best is trial 12 with value: 1881619881.7554562.\u001b[0m\n",
+ "num_leaves, val_score: 1881619881.755456: 35%|###5 | 7/20 [00:37<01:09, 5.33s/it]\u001b[32m[I 2021-09-29 23:15:02,188]\u001b[0m Trial 13 finished with value: 1896214256.4755397 and parameters: {'num_leaves': 149}. Best is trial 12 with value: 1881619881.7554562.\u001b[0m\n",
+ "num_leaves, val_score: 1881619881.755456: 40%|#### | 8/20 [00:46<01:16, 6.39s/it]\u001b[32m[I 2021-09-29 23:15:11,047]\u001b[0m Trial 14 finished with value: 1932801730.248241 and parameters: {'num_leaves': 249}. Best is trial 12 with value: 1881619881.7554562.\u001b[0m\n",
+ "num_leaves, val_score: 1874343820.223125: 45%|####5 | 9/20 [00:50<01:00, 5.49s/it]\u001b[32m[I 2021-09-29 23:15:14,420]\u001b[0m Trial 15 finished with value: 1874343820.2231247 and parameters: {'num_leaves': 90}. Best is trial 15 with value: 1874343820.2231247.\u001b[0m\n",
+ "num_leaves, val_score: 1874343820.223125: 50%|##### | 10/20 [00:51<00:43, 4.33s/it]\u001b[32m[I 2021-09-29 23:15:16,039]\u001b[0m Trial 16 finished with value: 1953979611.4522753 and parameters: {'num_leaves': 33}. Best is trial 15 with value: 1874343820.2231247.\u001b[0m\n",
+ "num_leaves, val_score: 1874343820.223125: 55%|#####5 | 11/20 [00:52<00:29, 3.24s/it]\u001b[32m[I 2021-09-29 23:15:16,762]\u001b[0m Trial 17 finished with value: 2096126435.5363083 and parameters: {'num_leaves': 6}. Best is trial 15 with value: 1874343820.2231247.\u001b[0m\n",
+ "num_leaves, val_score: 1874343820.223125: 60%|###### | 12/20 [00:56<00:28, 3.60s/it]\u001b[32m[I 2021-09-29 23:15:21,192]\u001b[0m Trial 18 finished with value: 1906288467.0368485 and parameters: {'num_leaves': 104}. Best is trial 15 with value: 1874343820.2231247.\u001b[0m\n",
+ "num_leaves, val_score: 1868351924.901373: 65%|######5 | 13/20 [01:02<00:28, 4.06s/it]\u001b[32m[I 2021-09-29 23:15:26,319]\u001b[0m Trial 19 finished with value: 1868351924.901373 and parameters: {'num_leaves': 123}. Best is trial 19 with value: 1868351924.901373.\u001b[0m\n",
+ "num_leaves, val_score: 1868351924.901373: 70%|####### | 14/20 [01:10<00:31, 5.25s/it]\u001b[32m[I 2021-09-29 23:15:34,344]\u001b[0m Trial 20 finished with value: 1914548906.8882365 and parameters: {'num_leaves': 182}. Best is trial 19 with value: 1868351924.901373.\u001b[0m\n",
+ "num_leaves, val_score: 1868351924.901373: 75%|#######5 | 15/20 [01:13<00:23, 4.62s/it]\u001b[32m[I 2021-09-29 23:15:37,498]\u001b[0m Trial 21 finished with value: 1893247951.8046007 and parameters: {'num_leaves': 66}. Best is trial 19 with value: 1868351924.901373.\u001b[0m\n",
+ "num_leaves, val_score: 1868351924.901373: 80%|######## | 16/20 [01:19<00:20, 5.14s/it]\u001b[32m[I 2021-09-29 23:15:43,859]\u001b[0m Trial 22 finished with value: 1869033871.381424 and parameters: {'num_leaves': 137}. Best is trial 19 with value: 1868351924.901373.\u001b[0m\n",
+ "num_leaves, val_score: 1868351924.901373: 85%|########5 | 17/20 [01:26<00:16, 5.59s/it]\u001b[32m[I 2021-09-29 23:15:50,493]\u001b[0m Trial 23 finished with value: 1906428309.75546 and parameters: {'num_leaves': 139}. Best is trial 19 with value: 1868351924.901373.\u001b[0m\n",
+ "num_leaves, val_score: 1868351924.901373: 90%|######### | 18/20 [01:33<00:12, 6.08s/it]\u001b[32m[I 2021-09-29 23:15:57,732]\u001b[0m Trial 24 finished with value: 1908820539.6759791 and parameters: {'num_leaves': 190}. Best is trial 19 with value: 1868351924.901373.\u001b[0m\n",
+ "num_leaves, val_score: 1863498501.613274: 95%|#########5| 19/20 [01:38<00:05, 5.89s/it]\u001b[32m[I 2021-09-29 23:16:03,159]\u001b[0m Trial 25 finished with value: 1863498501.6132743 and parameters: {'num_leaves': 132}. Best is trial 25 with value: 1863498501.6132743.\u001b[0m\n",
+ "num_leaves, val_score: 1863498501.613274: 100%|##########| 20/20 [01:45<00:00, 5.98s/it]\u001b[32m[I 2021-09-29 23:16:09,353]\u001b[0m Trial 26 finished with value: 1931430333.393386 and parameters: {'num_leaves': 163}. Best is trial 25 with value: 1863498501.6132743.\u001b[0m\n",
+ "num_leaves, val_score: 1863498501.613274: 100%|##########| 20/20 [01:45<00:00, 5.26s/it]\n",
+ "bagging, val_score: 1863498501.613274: 10%|# | 1/10 [00:06<00:54, 6.06s/it]\u001b[32m[I 2021-09-29 23:16:15,418]\u001b[0m Trial 27 finished with value: 1972994781.1627002 and parameters: {'bagging_fraction': 0.8189909368436051, 'bagging_freq': 5}. Best is trial 27 with value: 1972994781.1627002.\u001b[0m\n",
+ "bagging, val_score: 1863498501.613274: 20%|## | 2/10 [00:11<00:48, 6.01s/it]\u001b[32m[I 2021-09-29 23:16:21,308]\u001b[0m Trial 28 finished with value: 2049496930.4993417 and parameters: {'bagging_fraction': 0.7873861289934212, 'bagging_freq': 3}. Best is trial 27 with value: 1972994781.1627002.\u001b[0m\n",
+ "bagging, val_score: 1863498501.613274: 30%|### | 3/10 [00:18<00:42, 6.12s/it]\u001b[32m[I 2021-09-29 23:16:27,693]\u001b[0m Trial 29 finished with value: 2139768933.1393518 and parameters: {'bagging_fraction': 0.4388276153024021, 'bagging_freq': 2}. Best is trial 27 with value: 1972994781.1627002.\u001b[0m\n",
+ "bagging, val_score: 1863498501.613274: 40%|#### | 4/10 [00:24<00:37, 6.28s/it]\u001b[32m[I 2021-09-29 23:16:34,335]\u001b[0m Trial 30 finished with value: 2158142780.1248493 and parameters: {'bagging_fraction': 0.4239288962558818, 'bagging_freq': 2}. Best is trial 27 with value: 1972994781.1627002.\u001b[0m\n",
+ "bagging, val_score: 1863498501.613274: 50%|##### | 5/10 [00:31<00:31, 6.22s/it]\u001b[32m[I 2021-09-29 23:16:40,435]\u001b[0m Trial 31 finished with value: 2010657702.029828 and parameters: {'bagging_fraction': 0.715314699361536, 'bagging_freq': 6}. Best is trial 27 with value: 1972994781.1627002.\u001b[0m\n",
+ "bagging, val_score: 1863498501.613274: 60%|###### | 6/10 [00:36<00:24, 6.06s/it]\u001b[32m[I 2021-09-29 23:16:46,125]\u001b[0m Trial 32 finished with value: 1964933494.828049 and parameters: {'bagging_fraction': 0.7738253951287195, 'bagging_freq': 3}. Best is trial 32 with value: 1964933494.828049.\u001b[0m\n",
+ "bagging, val_score: 1863498501.613274: 70%|####### | 7/10 [00:42<00:18, 6.01s/it]\u001b[32m[I 2021-09-29 23:16:52,023]\u001b[0m Trial 33 finished with value: 2026997372.947912 and parameters: {'bagging_fraction': 0.745596764852547, 'bagging_freq': 5}. Best is trial 32 with value: 1964933494.828049.\u001b[0m\n",
+ "bagging, val_score: 1863498501.613274: 80%|######## | 8/10 [00:47<00:11, 5.65s/it]\u001b[32m[I 2021-09-29 23:16:56,834]\u001b[0m Trial 34 finished with value: 1906233781.152753 and parameters: {'bagging_fraction': 0.9945003931775, 'bagging_freq': 1}. Best is trial 34 with value: 1906233781.152753.\u001b[0m\n",
+ "bagging, val_score: 1863498501.613274: 90%|######### | 9/10 [00:53<00:05, 5.85s/it]\u001b[32m[I 2021-09-29 23:17:03,142]\u001b[0m Trial 35 finished with value: 1977288663.8965576 and parameters: {'bagging_fraction': 0.7683884472431496, 'bagging_freq': 5}. Best is trial 34 with value: 1906233781.152753.\u001b[0m\n",
+ "bagging, val_score: 1863498501.613274: 100%|##########| 10/10 [00:59<00:00, 5.92s/it]\u001b[32m[I 2021-09-29 23:17:09,223]\u001b[0m Trial 36 finished with value: 2070275360.3552403 and parameters: {'bagging_fraction': 0.6452285190521252, 'bagging_freq': 2}. Best is trial 34 with value: 1906233781.152753.\u001b[0m\n",
+ "bagging, val_score: 1863498501.613274: 100%|##########| 10/10 [00:59<00:00, 5.99s/it]\n",
+ "feature_fraction_stage2, val_score: 1863498501.613274: 17%|#6 | 1/6 [00:04<00:22, 4.59s/it]\u001b[32m[I 2021-09-29 23:17:13,824]\u001b[0m Trial 37 finished with value: 1863498501.6132743 and parameters: {'feature_fraction': 0.852}. Best is trial 37 with value: 1863498501.6132743.\u001b[0m\n",
+ "feature_fraction_stage2, val_score: 1863498501.613274: 33%|###3 | 2/6 [00:09<00:18, 4.72s/it]\u001b[32m[I 2021-09-29 23:17:18,825]\u001b[0m Trial 38 finished with value: 1863498501.6132743 and parameters: {'feature_fraction': 0.8839999999999999}. Best is trial 37 with value: 1863498501.6132743.\u001b[0m\n",
+ "feature_fraction_stage2, val_score: 1863498501.613274: 50%|##### | 3/6 [00:14<00:14, 4.69s/it]\u001b[32m[I 2021-09-29 23:17:23,441]\u001b[0m Trial 39 finished with value: 1863498501.6132743 and parameters: {'feature_fraction': 0.9159999999999999}. Best is trial 37 with value: 1863498501.6132743.\u001b[0m\n",
+ "feature_fraction_stage2, val_score: 1863498501.613274: 67%|######6 | 4/6 [00:18<00:09, 4.69s/it]\u001b[32m[I 2021-09-29 23:17:28,139]\u001b[0m Trial 40 finished with value: 1950431559.883238 and parameters: {'feature_fraction': 0.948}. Best is trial 37 with value: 1863498501.6132743.\u001b[0m\n",
+ "feature_fraction_stage2, val_score: 1863498501.613274: 83%|########3 | 5/6 [00:23<00:04, 4.74s/it]\u001b[32m[I 2021-09-29 23:17:33,002]\u001b[0m Trial 41 finished with value: 1863498501.6132743 and parameters: {'feature_fraction': 0.82}. Best is trial 37 with value: 1863498501.6132743.\u001b[0m\n",
+ "feature_fraction_stage2, val_score: 1863498501.613274: 100%|##########| 6/6 [00:28<00:00, 4.73s/it]\u001b[32m[I 2021-09-29 23:17:37,694]\u001b[0m Trial 42 finished with value: 1950431559.883238 and parameters: {'feature_fraction': 0.9799999999999999}. Best is trial 37 with value: 1863498501.6132743.\u001b[0m\n",
+ "feature_fraction_stage2, val_score: 1863498501.613274: 100%|##########| 6/6 [00:28<00:00, 4.74s/it]\n",
+ "regularization_factors, val_score: 1863498501.613274: 5%|5 | 1/20 [00:04<01:30, 4.76s/it]\u001b[32m[I 2021-09-29 23:17:42,464]\u001b[0m Trial 43 finished with value: 1900519550.3985364 and parameters: {'lambda_l1': 1.724493294436389e-07, 'lambda_l2': 0.0011802180675328437}. Best is trial 43 with value: 1900519550.3985364.\u001b[0m\n",
+ "regularization_factors, val_score: 1863498501.613274: 10%|# | 2/20 [00:09<01:25, 4.76s/it]\u001b[32m[I 2021-09-29 23:17:47,207]\u001b[0m Trial 44 finished with value: 1897124810.0132961 and parameters: {'lambda_l1': 0.015497205121979793, 'lambda_l2': 0.00015637359721725182}. Best is trial 44 with value: 1897124810.0132961.\u001b[0m\n",
+ "regularization_factors, val_score: 1863498501.613274: 15%|#5 | 3/20 [00:14<01:21, 4.77s/it]\u001b[32m[I 2021-09-29 23:17:51,995]\u001b[0m Trial 45 finished with value: 1891252495.3075037 and parameters: {'lambda_l1': 9.16513906590219e-07, 'lambda_l2': 0.026000627128814197}. Best is trial 45 with value: 1891252495.3075037.\u001b[0m\n",
+ "regularization_factors, val_score: 1863498500.800182: 20%|## | 4/20 [00:19<01:16, 4.77s/it]\u001b[32m[I 2021-09-29 23:17:56,768]\u001b[0m Trial 46 finished with value: 1863498500.8001816 and parameters: {'lambda_l1': 0.0011712453303529063, 'lambda_l2': 6.256511928312648e-08}. Best is trial 46 with value: 1863498500.8001816.\u001b[0m\n",
+ "regularization_factors, val_score: 1863498470.597524: 25%|##5 | 5/20 [00:24<01:12, 4.84s/it]\u001b[32m[I 2021-09-29 23:18:01,762]\u001b[0m Trial 47 finished with value: 1863498470.5975237 and parameters: {'lambda_l1': 0.053923251257381184, 'lambda_l2': 6.016404526705668e-08}. Best is trial 47 with value: 1863498470.5975237.\u001b[0m\n",
+ "regularization_factors, val_score: 1863498470.597524: 30%|### | 6/20 [00:28<01:07, 4.82s/it]\u001b[32m[I 2021-09-29 23:18:06,535]\u001b[0m Trial 48 finished with value: 1863498500.6828027 and parameters: {'lambda_l1': 6.008912045555043e-05, 'lambda_l2': 3.5014865430650066e-07}. Best is trial 47 with value: 1863498470.5975237.\u001b[0m\n",
+ "regularization_factors, val_score: 1863498144.156982: 35%|###5 | 7/20 [00:34<01:04, 4.96s/it]\u001b[32m[I 2021-09-29 23:18:11,824]\u001b[0m Trial 49 finished with value: 1863498144.1569824 and parameters: {'lambda_l1': 0.2873876088430125, 'lambda_l2': 6.275345656705353e-05}. Best is trial 49 with value: 1863498144.1569824.\u001b[0m\n",
+ "regularization_factors, val_score: 1829411923.127120: 40%|#### | 8/20 [00:39<00:59, 4.96s/it]\u001b[32m[I 2021-09-29 23:18:16,784]\u001b[0m Trial 50 finished with value: 1829411923.1271203 and parameters: {'lambda_l1': 0.014530100017980814, 'lambda_l2': 0.013764223496447852}. Best is trial 50 with value: 1829411923.1271203.\u001b[0m\n",
+ "regularization_factors, val_score: 1829411923.127120: 45%|####5 | 9/20 [00:44<00:54, 4.95s/it]\u001b[32m[I 2021-09-29 23:18:21,714]\u001b[0m Trial 51 finished with value: 1908686079.6273117 and parameters: {'lambda_l1': 4.049940611761022e-08, 'lambda_l2': 0.00040034467394794076}. Best is trial 50 with value: 1829411923.1271203.\u001b[0m\n",
+ "regularization_factors, val_score: 1829411923.127120: 50%|##### | 10/20 [00:49<00:49, 4.99s/it]\u001b[32m[I 2021-09-29 23:18:26,790]\u001b[0m Trial 52 finished with value: 1885012218.0004046 and parameters: {'lambda_l1': 0.0002200189403526506, 'lambda_l2': 0.20302349889094043}. Best is trial 50 with value: 1829411923.1271203.\u001b[0m\n",
+ "regularization_factors, val_score: 1829411923.127120: 55%|#####5 | 11/20 [00:54<00:45, 5.03s/it]\u001b[32m[I 2021-09-29 23:18:31,926]\u001b[0m Trial 53 finished with value: 1906625061.3932586 and parameters: {'lambda_l1': 5.510584147431707, 'lambda_l2': 1.5865265278948395}. Best is trial 50 with value: 1829411923.1271203.\u001b[0m\n",
+ "regularization_factors, val_score: 1829411923.127120: 60%|###### | 12/20 [00:59<00:41, 5.13s/it]\u001b[32m[I 2021-09-29 23:18:37,269]\u001b[0m Trial 54 finished with value: 1913356940.3398488 and parameters: {'lambda_l1': 7.919177639141472, 'lambda_l2': 7.0464807271647355e-06}. Best is trial 50 with value: 1829411923.1271203.\u001b[0m\n",
+ "regularization_factors, val_score: 1829411923.127120: 65%|######5 | 13/20 [01:04<00:36, 5.20s/it]\u001b[32m[I 2021-09-29 23:18:42,634]\u001b[0m Trial 55 finished with value: 1943493473.7805178 and parameters: {'lambda_l1': 0.2709128839307005, 'lambda_l2': 0.008152038140390653}. Best is trial 50 with value: 1829411923.1271203.\u001b[0m\n",
+ "regularization_factors, val_score: 1829411923.127120: 70%|####### | 14/20 [01:10<00:31, 5.18s/it]\u001b[32m[I 2021-09-29 23:18:47,759]\u001b[0m Trial 56 finished with value: 1869993775.185964 and parameters: {'lambda_l1': 0.6252702501891693, 'lambda_l2': 1.1976465197068111e-05}. Best is trial 50 with value: 1829411923.1271203.\u001b[0m\n",
+ "regularization_factors, val_score: 1829411923.127120: 75%|#######5 | 15/20 [01:14<00:25, 5.04s/it]\u001b[32m[I 2021-09-29 23:18:52,482]\u001b[0m Trial 57 finished with value: 1863498474.3189669 and parameters: {'lambda_l1': 0.0031261290560461548, 'lambda_l2': 8.158381653124474e-06}. Best is trial 50 with value: 1829411923.1271203.\u001b[0m\n",
+ "regularization_factors, val_score: 1829411923.127120: 80%|######## | 16/20 [01:19<00:19, 4.95s/it]\u001b[32m[I 2021-09-29 23:18:57,236]\u001b[0m Trial 58 finished with value: 1906922013.1929636 and parameters: {'lambda_l1': 5.0437093419934566e-06, 'lambda_l2': 8.510701285476182}. Best is trial 50 with value: 1829411923.1271203.\u001b[0m\n",
+ "regularization_factors, val_score: 1829411923.127120: 85%|########5 | 17/20 [01:25<00:15, 5.17s/it]\u001b[32m[I 2021-09-29 23:19:02,913]\u001b[0m Trial 59 finished with value: 1941548787.4363914 and parameters: {'lambda_l1': 0.489782747632979, 'lambda_l2': 0.010063061940333582}. Best is trial 50 with value: 1829411923.1271203.\u001b[0m\n",
+ "regularization_factors, val_score: 1829411923.127120: 90%|######### | 18/20 [01:30<00:10, 5.17s/it]\u001b[32m[I 2021-09-29 23:19:08,065]\u001b[0m Trial 60 finished with value: 1922251238.1017878 and parameters: {'lambda_l1': 0.014630309366687438, 'lambda_l2': 0.20905649983873378}. Best is trial 50 with value: 1829411923.1271203.\u001b[0m\n",
+ "regularization_factors, val_score: 1829411923.127120: 95%|#########5| 19/20 [01:35<00:05, 5.09s/it]\u001b[32m[I 2021-09-29 23:19:12,966]\u001b[0m Trial 61 finished with value: 1907187734.8434992 and parameters: {'lambda_l1': 2.908849929024249, 'lambda_l2': 6.175780052432868e-05}. Best is trial 50 with value: 1829411923.1271203.\u001b[0m\n",
+ "regularization_factors, val_score: 1829411923.127120: 100%|##########| 20/20 [01:40<00:00, 5.08s/it]\u001b[32m[I 2021-09-29 23:19:18,016]\u001b[0m Trial 62 finished with value: 1887887228.6365638 and parameters: {'lambda_l1': 0.08176402124056348, 'lambda_l2': 0.0022386651346889396}. Best is trial 50 with value: 1829411923.1271203.\u001b[0m\n",
+ "regularization_factors, val_score: 1829411923.127120: 100%|##########| 20/20 [01:40<00:00, 5.02s/it]\n",
+ "min_data_in_leaf, val_score: 1829411923.127120: 20%|## | 1/5 [00:05<00:21, 5.27s/it]\u001b[32m[I 2021-09-29 23:19:23,298]\u001b[0m Trial 63 finished with value: 1887743370.2363958 and parameters: {'min_child_samples': 25}. Best is trial 63 with value: 1887743370.2363958.\u001b[0m\n",
+ "min_data_in_leaf, val_score: 1829411923.127120: 40%|#### | 2/5 [00:11<00:16, 5.60s/it]\u001b[32m[I 2021-09-29 23:19:29,669]\u001b[0m Trial 64 finished with value: 1988738364.2495584 and parameters: {'min_child_samples': 100}. Best is trial 63 with value: 1887743370.2363958.\u001b[0m\n",
+ "min_data_in_leaf, val_score: 1829411923.127120: 60%|###### | 3/5 [00:16<00:10, 5.32s/it]\u001b[32m[I 2021-09-29 23:19:34,327]\u001b[0m Trial 65 finished with value: 1872759368.4778924 and parameters: {'min_child_samples': 10}. Best is trial 65 with value: 1872759368.4778924.\u001b[0m\n",
+ "min_data_in_leaf, val_score: 1829411923.127120: 80%|######## | 4/5 [00:20<00:04, 4.98s/it]\u001b[32m[I 2021-09-29 23:19:38,517]\u001b[0m Trial 66 finished with value: 1866723989.0731173 and parameters: {'min_child_samples': 5}. Best is trial 66 with value: 1866723989.0731173.\u001b[0m\n",
+ "min_data_in_leaf, val_score: 1829411923.127120: 100%|##########| 5/5 [00:27<00:00, 5.62s/it]\u001b[32m[I 2021-09-29 23:19:45,625]\u001b[0m Trial 67 finished with value: 1965123941.88074 and parameters: {'min_child_samples': 50}. Best is trial 66 with value: 1866723989.0731173.\u001b[0m\n",
+ "min_data_in_leaf, val_score: 1829411923.127120: 100%|##########| 5/5 [00:27<00:00, 5.52s/it]"
]
},
{
"output_type": "stream",
"name": "stdout",
"text": [
- "CPU times: user 5min 24s, sys: 17.3 s, total: 5min 41s\n",
- "Wall time: 5min 42s\n"
+ "CPU times: user 5min 14s, sys: 16.8 s, total: 5min 31s\n",
+ "Wall time: 5min 32s\n"
]
},
{
@@ -759,7 +719,7 @@
"output_type": "stream",
"name": "stdout",
"text": [
- "Optuna LightGBM Tuner r2 = 0.8428464421292586\n"
+ "Optuna LightGBM Tuner r2 = 0.8444445782478855\n"
]
}
],
@@ -786,7 +746,6 @@
"cell_type": "code",
"execution_count": 21,
"source": [
- "\n",
"import numpy as np \n",
"\n",
"''' define your customized objective function '''\n",
@@ -817,8 +776,8 @@
" '''LGBMEstimator with my_loss_obj as the objective function\n",
" '''\n",
"\n",
- " def __init__(self, **params):\n",
- " super().__init__(objective=my_loss_obj, **params)"
+ " def __init__(self, **config):\n",
+ " super().__init__(objective=my_loss_obj, **config)"
],
"outputs": [],
"metadata": {}
@@ -850,106 +809,99 @@
"output_type": "stream",
"name": "stderr",
"text": [
- "[flaml.automl: 08-22 21:19:04] {1130} INFO - Evaluation method: cv\n",
- "[flaml.automl: 08-22 21:19:04] {634} INFO - Using RepeatedKFold\n",
- "[flaml.automl: 08-22 21:19:04] {1155} INFO - Minimizing error metric: 1-r2\n",
- "[flaml.automl: 08-22 21:19:04] {1175} INFO - List of ML learners in AutoML Run: ['my_lgbm']\n",
- "[flaml.automl: 08-22 21:19:04] {1358} INFO - iteration 0, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:04] {1515} INFO - at 0.2s,\tbest my_lgbm's error=2.9888,\tbest my_lgbm's error=2.9888\n",
- "[flaml.automl: 08-22 21:19:04] {1358} INFO - iteration 1, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:04] {1515} INFO - at 0.3s,\tbest my_lgbm's error=2.9888,\tbest my_lgbm's error=2.9888\n",
- "[flaml.automl: 08-22 21:19:04] {1358} INFO - iteration 2, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:04] {1515} INFO - at 0.5s,\tbest my_lgbm's error=1.7087,\tbest my_lgbm's error=1.7087\n",
- "[flaml.automl: 08-22 21:19:04] {1358} INFO - iteration 3, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:05] {1515} INFO - at 0.6s,\tbest my_lgbm's error=0.3465,\tbest my_lgbm's error=0.3465\n",
- "[flaml.automl: 08-22 21:19:05] {1358} INFO - iteration 4, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:05] {1515} INFO - at 0.8s,\tbest my_lgbm's error=0.3465,\tbest my_lgbm's error=0.3465\n",
- "[flaml.automl: 08-22 21:19:05] {1358} INFO - iteration 5, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:05] {1515} INFO - at 1.0s,\tbest my_lgbm's error=0.3005,\tbest my_lgbm's error=0.3005\n",
- "[flaml.automl: 08-22 21:19:05] {1358} INFO - iteration 6, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:05] {1515} INFO - at 1.2s,\tbest my_lgbm's error=0.3005,\tbest my_lgbm's error=0.3005\n",
- "[flaml.automl: 08-22 21:19:05] {1358} INFO - iteration 7, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:05] {1515} INFO - at 1.3s,\tbest my_lgbm's error=0.3005,\tbest my_lgbm's error=0.3005\n",
- "[flaml.automl: 08-22 21:19:05] {1358} INFO - iteration 8, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:06] {1515} INFO - at 1.6s,\tbest my_lgbm's error=0.2709,\tbest my_lgbm's error=0.2709\n",
- "[flaml.automl: 08-22 21:19:06] {1358} INFO - iteration 9, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:06] {1515} INFO - at 1.8s,\tbest my_lgbm's error=0.2709,\tbest my_lgbm's error=0.2709\n",
- "[flaml.automl: 08-22 21:19:06] {1358} INFO - iteration 10, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:07] {1515} INFO - at 2.8s,\tbest my_lgbm's error=0.1852,\tbest my_lgbm's error=0.1852\n",
- "[flaml.automl: 08-22 21:19:07] {1358} INFO - iteration 11, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:08] {1515} INFO - at 3.9s,\tbest my_lgbm's error=0.1852,\tbest my_lgbm's error=0.1852\n",
- "[flaml.automl: 08-22 21:19:08] {1358} INFO - iteration 12, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:09] {1515} INFO - at 4.7s,\tbest my_lgbm's error=0.1852,\tbest my_lgbm's error=0.1852\n",
- "[flaml.automl: 08-22 21:19:09] {1358} INFO - iteration 13, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:10] {1515} INFO - at 5.7s,\tbest my_lgbm's error=0.1852,\tbest my_lgbm's error=0.1852\n",
- "[flaml.automl: 08-22 21:19:10] {1358} INFO - iteration 14, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:11] {1515} INFO - at 6.8s,\tbest my_lgbm's error=0.1852,\tbest my_lgbm's error=0.1852\n",
- "[flaml.automl: 08-22 21:19:11] {1358} INFO - iteration 15, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:12] {1515} INFO - at 8.1s,\tbest my_lgbm's error=0.1804,\tbest my_lgbm's error=0.1804\n",
- "[flaml.automl: 08-22 21:19:12] {1358} INFO - iteration 16, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:13] {1515} INFO - at 9.0s,\tbest my_lgbm's error=0.1804,\tbest my_lgbm's error=0.1804\n",
- "[flaml.automl: 08-22 21:19:13] {1358} INFO - iteration 17, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:17] {1515} INFO - at 12.9s,\tbest my_lgbm's error=0.1777,\tbest my_lgbm's error=0.1777\n",
- "[flaml.automl: 08-22 21:19:17] {1358} INFO - iteration 18, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:18] {1515} INFO - at 14.3s,\tbest my_lgbm's error=0.1777,\tbest my_lgbm's error=0.1777\n",
- "[flaml.automl: 08-22 21:19:18] {1358} INFO - iteration 19, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:20] {1515} INFO - at 15.7s,\tbest my_lgbm's error=0.1777,\tbest my_lgbm's error=0.1777\n",
- "[flaml.automl: 08-22 21:19:20] {1358} INFO - iteration 20, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:28] {1515} INFO - at 24.1s,\tbest my_lgbm's error=0.1777,\tbest my_lgbm's error=0.1777\n",
- "[flaml.automl: 08-22 21:19:28] {1358} INFO - iteration 21, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:32] {1515} INFO - at 27.9s,\tbest my_lgbm's error=0.1777,\tbest my_lgbm's error=0.1777\n",
- "[flaml.automl: 08-22 21:19:32] {1358} INFO - iteration 22, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:34] {1515} INFO - at 30.4s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n",
- "[flaml.automl: 08-22 21:19:34] {1358} INFO - iteration 23, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:36] {1515} INFO - at 32.4s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n",
- "[flaml.automl: 08-22 21:19:36] {1358} INFO - iteration 24, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:41] {1515} INFO - at 36.9s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n",
- "[flaml.automl: 08-22 21:19:41] {1358} INFO - iteration 25, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:51] {1515} INFO - at 47.4s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n",
- "[flaml.automl: 08-22 21:19:51] {1358} INFO - iteration 26, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:52] {1515} INFO - at 48.3s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n",
- "[flaml.automl: 08-22 21:19:52] {1358} INFO - iteration 27, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:19:59] {1515} INFO - at 55.1s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n",
- "[flaml.automl: 08-22 21:19:59] {1358} INFO - iteration 28, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:20:00] {1515} INFO - at 56.3s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n",
- "[flaml.automl: 08-22 21:20:00] {1358} INFO - iteration 29, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:20:01] {1515} INFO - at 57.0s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n",
- "[flaml.automl: 08-22 21:20:01] {1358} INFO - iteration 30, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:20:16] {1515} INFO - at 72.2s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n",
- "[flaml.automl: 08-22 21:20:16] {1358} INFO - iteration 31, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:20:18] {1515} INFO - at 74.1s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n",
- "[flaml.automl: 08-22 21:20:18] {1358} INFO - iteration 32, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:20:22] {1515} INFO - at 78.3s,\tbest my_lgbm's error=0.1669,\tbest my_lgbm's error=0.1669\n",
- "[flaml.automl: 08-22 21:20:22] {1358} INFO - iteration 33, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:20:26] {1515} INFO - at 82.0s,\tbest my_lgbm's error=0.1669,\tbest my_lgbm's error=0.1669\n",
- "[flaml.automl: 08-22 21:20:26] {1358} INFO - iteration 34, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:20:29] {1515} INFO - at 84.6s,\tbest my_lgbm's error=0.1669,\tbest my_lgbm's error=0.1669\n",
- "[flaml.automl: 08-22 21:20:29] {1358} INFO - iteration 35, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:20:46] {1515} INFO - at 101.8s,\tbest my_lgbm's error=0.1669,\tbest my_lgbm's error=0.1669\n",
- "[flaml.automl: 08-22 21:20:46] {1358} INFO - iteration 36, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:20:47] {1515} INFO - at 102.9s,\tbest my_lgbm's error=0.1669,\tbest my_lgbm's error=0.1669\n",
- "[flaml.automl: 08-22 21:20:47] {1358} INFO - iteration 37, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:20:55] {1515} INFO - at 111.2s,\tbest my_lgbm's error=0.1597,\tbest my_lgbm's error=0.1597\n",
- "[flaml.automl: 08-22 21:20:55] {1358} INFO - iteration 38, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:20:58] {1515} INFO - at 114.5s,\tbest my_lgbm's error=0.1597,\tbest my_lgbm's error=0.1597\n",
- "[flaml.automl: 08-22 21:20:58] {1358} INFO - iteration 39, current learner my_lgbm\n",
- "[flaml.automl: 08-22 21:21:37] {1515} INFO - at 153.5s,\tbest my_lgbm's error=0.1597,\tbest my_lgbm's error=0.1597\n",
- "[flaml.automl: 08-22 21:21:37] {1592} INFO - selected model: LGBMRegressor(colsample_bytree=0.8251774147208681,\n",
- " learning_rate=0.21049408131691624, max_bin=512,\n",
- " min_child_samples=19, n_estimators=196, num_leaves=195,\n",
- " objective=,\n",
- " reg_alpha=0.0009765625, reg_lambda=0.0117923889609937,\n",
+ "[flaml.automl: 09-29 23:19:46] {1446} INFO - Data split method: uniform\n",
+ "[flaml.automl: 09-29 23:19:46] {1450} INFO - Evaluation method: cv\n",
+ "[flaml.automl: 09-29 23:19:46] {1496} INFO - Minimizing error metric: 1-r2\n",
+ "[flaml.automl: 09-29 23:19:46] {1533} INFO - List of ML learners in AutoML Run: ['my_lgbm']\n",
+ "[flaml.automl: 09-29 23:19:46] {1763} INFO - iteration 0, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:19:46] {1880} INFO - Estimated sufficient time budget=1596s. Estimated necessary time budget=2s.\n",
+ "[flaml.automl: 09-29 23:19:46] {1952} INFO - at 0.2s,\testimator my_lgbm's best error=2.9883,\tbest estimator my_lgbm's best error=2.9883\n",
+ "[flaml.automl: 09-29 23:19:46] {1763} INFO - iteration 1, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:19:47] {1952} INFO - at 0.4s,\testimator my_lgbm's best error=2.9883,\tbest estimator my_lgbm's best error=2.9883\n",
+ "[flaml.automl: 09-29 23:19:47] {1763} INFO - iteration 2, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:19:47] {1952} INFO - at 0.5s,\testimator my_lgbm's best error=1.7086,\tbest estimator my_lgbm's best error=1.7086\n",
+ "[flaml.automl: 09-29 23:19:47] {1763} INFO - iteration 3, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:19:47] {1952} INFO - at 0.7s,\testimator my_lgbm's best error=0.3474,\tbest estimator my_lgbm's best error=0.3474\n",
+ "[flaml.automl: 09-29 23:19:47] {1763} INFO - iteration 4, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:19:47] {1952} INFO - at 0.9s,\testimator my_lgbm's best error=0.3474,\tbest estimator my_lgbm's best error=0.3474\n",
+ "[flaml.automl: 09-29 23:19:47] {1763} INFO - iteration 5, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:19:47] {1952} INFO - at 1.1s,\testimator my_lgbm's best error=0.3015,\tbest estimator my_lgbm's best error=0.3015\n",
+ "[flaml.automl: 09-29 23:19:47] {1763} INFO - iteration 6, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:19:48] {1952} INFO - at 1.3s,\testimator my_lgbm's best error=0.3015,\tbest estimator my_lgbm's best error=0.3015\n",
+ "[flaml.automl: 09-29 23:19:48] {1763} INFO - iteration 7, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:19:48] {1952} INFO - at 1.5s,\testimator my_lgbm's best error=0.3015,\tbest estimator my_lgbm's best error=0.3015\n",
+ "[flaml.automl: 09-29 23:19:48] {1763} INFO - iteration 8, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:19:48] {1952} INFO - at 1.8s,\testimator my_lgbm's best error=0.2721,\tbest estimator my_lgbm's best error=0.2721\n",
+ "[flaml.automl: 09-29 23:19:48] {1763} INFO - iteration 9, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:19:48] {1952} INFO - at 2.0s,\testimator my_lgbm's best error=0.2721,\tbest estimator my_lgbm's best error=0.2721\n",
+ "[flaml.automl: 09-29 23:19:48] {1763} INFO - iteration 10, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:19:49] {1952} INFO - at 3.2s,\testimator my_lgbm's best error=0.1833,\tbest estimator my_lgbm's best error=0.1833\n",
+ "[flaml.automl: 09-29 23:19:49] {1763} INFO - iteration 11, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:19:51] {1952} INFO - at 4.7s,\testimator my_lgbm's best error=0.1833,\tbest estimator my_lgbm's best error=0.1833\n",
+ "[flaml.automl: 09-29 23:19:51] {1763} INFO - iteration 12, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:19:52] {1952} INFO - at 5.5s,\testimator my_lgbm's best error=0.1833,\tbest estimator my_lgbm's best error=0.1833\n",
+ "[flaml.automl: 09-29 23:19:52] {1763} INFO - iteration 13, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:19:53] {1952} INFO - at 6.8s,\testimator my_lgbm's best error=0.1833,\tbest estimator my_lgbm's best error=0.1833\n",
+ "[flaml.automl: 09-29 23:19:53] {1763} INFO - iteration 14, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:19:54] {1952} INFO - at 8.1s,\testimator my_lgbm's best error=0.1833,\tbest estimator my_lgbm's best error=0.1833\n",
+ "[flaml.automl: 09-29 23:19:54] {1763} INFO - iteration 15, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:19:56] {1952} INFO - at 9.7s,\testimator my_lgbm's best error=0.1762,\tbest estimator my_lgbm's best error=0.1762\n",
+ "[flaml.automl: 09-29 23:19:56] {1763} INFO - iteration 16, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:19:57] {1952} INFO - at 10.8s,\testimator my_lgbm's best error=0.1762,\tbest estimator my_lgbm's best error=0.1762\n",
+ "[flaml.automl: 09-29 23:19:57] {1763} INFO - iteration 17, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:20:04] {1952} INFO - at 17.7s,\testimator my_lgbm's best error=0.1760,\tbest estimator my_lgbm's best error=0.1760\n",
+ "[flaml.automl: 09-29 23:20:04] {1763} INFO - iteration 18, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:20:06] {1952} INFO - at 19.8s,\testimator my_lgbm's best error=0.1760,\tbest estimator my_lgbm's best error=0.1760\n",
+ "[flaml.automl: 09-29 23:20:06] {1763} INFO - iteration 19, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:20:08] {1952} INFO - at 22.2s,\testimator my_lgbm's best error=0.1760,\tbest estimator my_lgbm's best error=0.1760\n",
+ "[flaml.automl: 09-29 23:20:08] {1763} INFO - iteration 20, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:20:22] {1952} INFO - at 35.5s,\testimator my_lgbm's best error=0.1760,\tbest estimator my_lgbm's best error=0.1760\n",
+ "[flaml.automl: 09-29 23:20:22] {1763} INFO - iteration 21, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:20:28] {1952} INFO - at 41.7s,\testimator my_lgbm's best error=0.1760,\tbest estimator my_lgbm's best error=0.1760\n",
+ "[flaml.automl: 09-29 23:20:28] {1763} INFO - iteration 22, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:20:32] {1952} INFO - at 45.6s,\testimator my_lgbm's best error=0.1706,\tbest estimator my_lgbm's best error=0.1706\n",
+ "[flaml.automl: 09-29 23:20:32] {1763} INFO - iteration 23, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:20:35] {1952} INFO - at 49.0s,\testimator my_lgbm's best error=0.1706,\tbest estimator my_lgbm's best error=0.1706\n",
+ "[flaml.automl: 09-29 23:20:35] {1763} INFO - iteration 24, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:20:42] {1952} INFO - at 55.4s,\testimator my_lgbm's best error=0.1706,\tbest estimator my_lgbm's best error=0.1706\n",
+ "[flaml.automl: 09-29 23:20:42] {1763} INFO - iteration 25, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:20:58] {1952} INFO - at 71.6s,\testimator my_lgbm's best error=0.1706,\tbest estimator my_lgbm's best error=0.1706\n",
+ "[flaml.automl: 09-29 23:20:58] {1763} INFO - iteration 26, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:20:59] {1952} INFO - at 72.9s,\testimator my_lgbm's best error=0.1706,\tbest estimator my_lgbm's best error=0.1706\n",
+ "[flaml.automl: 09-29 23:20:59] {1763} INFO - iteration 27, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:21:11] {1952} INFO - at 84.3s,\testimator my_lgbm's best error=0.1706,\tbest estimator my_lgbm's best error=0.1706\n",
+ "[flaml.automl: 09-29 23:21:11] {1763} INFO - iteration 28, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:21:13] {1952} INFO - at 87.0s,\testimator my_lgbm's best error=0.1706,\tbest estimator my_lgbm's best error=0.1706\n",
+ "[flaml.automl: 09-29 23:21:13] {1763} INFO - iteration 29, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:21:14] {1952} INFO - at 88.2s,\testimator my_lgbm's best error=0.1706,\tbest estimator my_lgbm's best error=0.1706\n",
+ "[flaml.automl: 09-29 23:21:14] {1763} INFO - iteration 30, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:21:39] {1952} INFO - at 112.9s,\testimator my_lgbm's best error=0.1706,\tbest estimator my_lgbm's best error=0.1706\n",
+ "[flaml.automl: 09-29 23:21:39] {1763} INFO - iteration 31, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:21:42] {1952} INFO - at 115.6s,\testimator my_lgbm's best error=0.1706,\tbest estimator my_lgbm's best error=0.1706\n",
+ "[flaml.automl: 09-29 23:21:42] {1763} INFO - iteration 32, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:21:48] {1952} INFO - at 122.0s,\testimator my_lgbm's best error=0.1632,\tbest estimator my_lgbm's best error=0.1632\n",
+ "[flaml.automl: 09-29 23:21:48] {1763} INFO - iteration 33, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:21:54] {1952} INFO - at 127.4s,\testimator my_lgbm's best error=0.1632,\tbest estimator my_lgbm's best error=0.1632\n",
+ "[flaml.automl: 09-29 23:21:54] {1763} INFO - iteration 34, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:21:58] {1952} INFO - at 131.3s,\testimator my_lgbm's best error=0.1632,\tbest estimator my_lgbm's best error=0.1632\n",
+ "[flaml.automl: 09-29 23:21:58] {1763} INFO - iteration 35, current learner my_lgbm\n",
+ "[flaml.automl: 09-29 23:22:18] {1952} INFO - at 152.2s,\testimator my_lgbm's best error=0.1632,\tbest estimator my_lgbm's best error=0.1632\n",
+ "[flaml.automl: 09-29 23:22:18] {2059} INFO - selected model: LGBMRegressor(colsample_bytree=0.8422311526890249,\n",
+ " learning_rate=0.4130805075333343, max_bin=1023,\n",
+ " min_child_samples=10, n_estimators=95, num_leaves=221,\n",
+ " objective=,\n",
+ " reg_alpha=0.007704104902643932, reg_lambda=0.003151767359549649,\n",
" verbose=-1)\n",
- "[flaml.automl: 08-22 21:21:39] {1633} INFO - retrain my_lgbm for 1.6s\n",
- "[flaml.automl: 08-22 21:21:39] {1636} INFO - retrained model: LGBMRegressor(colsample_bytree=0.8251774147208681,\n",
- " learning_rate=0.21049408131691624, max_bin=512,\n",
- " min_child_samples=19, n_estimators=196, num_leaves=195,\n",
- " objective=,\n",
- " reg_alpha=0.0009765625, reg_lambda=0.0117923889609937,\n",
+ "[flaml.automl: 09-29 23:22:20] {2122} INFO - retrain my_lgbm for 1.6s\n",
+ "[flaml.automl: 09-29 23:22:20] {2128} INFO - retrained model: LGBMRegressor(colsample_bytree=0.8422311526890249,\n",
+ " learning_rate=0.4130805075333343, max_bin=1023,\n",
+ " min_child_samples=10, n_estimators=95, num_leaves=221,\n",
+ " objective=,\n",
+ " reg_alpha=0.007704104902643932, reg_lambda=0.003151767359549649,\n",
" verbose=-1)\n",
- "[flaml.automl: 08-22 21:21:39] {1199} INFO - fit succeeded\n",
- "[flaml.automl: 08-22 21:21:39] {1200} INFO - Time taken to find the best model: 111.22549629211426\n",
- "[flaml.automl: 08-22 21:21:39] {1205} WARNING - Time taken to find the best model is 74% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n"
+ "[flaml.automl: 09-29 23:22:20] {1557} INFO - fit succeeded\n",
+ "[flaml.automl: 09-29 23:22:20] {1558} INFO - Time taken to find the best model: 121.97463893890381\n",
+ "[flaml.automl: 09-29 23:22:20] {1569} WARNING - Time taken to find the best model is 81% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n"
]
}
],
@@ -979,11 +931,11 @@
"output_type": "stream",
"name": "stdout",
"text": [
- "Best hyperparmeter config: {'n_estimators': 196, 'num_leaves': 195, 'min_child_samples': 19, 'learning_rate': 0.21049408131691624, 'log_max_bin': 10, 'colsample_bytree': 0.8251774147208681, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.0117923889609937}\n",
- "Best r2 on validation data: 0.8403\n",
- "Training duration of best run: 8.28 s\n",
- "Predicted labels [137336.50894266 249721.8950541 155077.11127769 ... 191822.32898046\n",
- " 197332.92376977 286448.29599298]\n",
+ "Best hyperparmeter config: {'n_estimators': 95, 'num_leaves': 221, 'min_child_samples': 10, 'learning_rate': 0.4130805075333343, 'log_max_bin': 10, 'colsample_bytree': 0.8422311526890249, 'reg_alpha': 0.007704104902643932, 'reg_lambda': 0.003151767359549649}\n",
+ "Best r2 on validation data: 0.8368\n",
+ "Training duration of best run: 6.404 s\n",
+ "Predicted labels [161485.59767093 248585.87889042 157837.93378105 ... 184356.07034452\n",
+ " 223247.80995858 259281.61167122]\n",
"True labels 14740 136900.0\n",
"10101 241300.0\n",
"20566 200700.0\n",
@@ -996,9 +948,9 @@
"8522 227300.0\n",
"16798 265600.0\n",
"Name: median_house_value, Length: 5160, dtype: float64\n",
- "r2 = 0.8498843855121221\n",
- "mse = 1984304232.0760334\n",
- "mae = 29465.919207148785\n"
+ "r2 = 0.8429833151406843\n",
+ "mse = 2075526075.9236276\n",
+ "mae = 30102.910560642205\n"
]
}
],
@@ -1009,7 +961,7 @@
],
"metadata": {
"interpreter": {
- "hash": "ea9f131eb1b7663628f6445553ba215a834e2f0b4d18774746f0f47938ce4671"
+ "hash": "0cfea3304185a9579d09e0953576b57c8581e46e6ebc6dfeb681bc5a511f7544"
},
"kernelspec": {
"name": "python3",
diff --git a/notebook/flaml_xgboost.ipynb b/notebook/flaml_xgboost.ipynb
index 7257bea0a..0cfd0f6e8 100644
--- a/notebook/flaml_xgboost.ipynb
+++ b/notebook/flaml_xgboost.ipynb
@@ -3,7 +3,7 @@
{
"cell_type": "markdown",
"source": [
- "Copyright (c) 2020-2021 Microsoft Corporation. All rights reserved. \n",
+ "Copyright (c) Microsoft Corporation. All rights reserved. \n",
"\n",
"Licensed under the MIT License.\n",
"\n",
@@ -140,70 +140,71 @@
"output_type": "stream",
"name": "stderr",
"text": [
- "[flaml.automl: 08-22 21:23:40] {1130} INFO - Evaluation method: cv\n",
- "[flaml.automl: 08-22 21:23:40] {634} INFO - Using RepeatedKFold\n",
- "[flaml.automl: 08-22 21:23:40] {1155} INFO - Minimizing error metric: 1-r2\n",
- "[flaml.automl: 08-22 21:23:40] {1175} INFO - List of ML learners in AutoML Run: ['xgboost']\n",
- "[flaml.automl: 08-22 21:23:40] {1358} INFO - iteration 0, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:41] {1515} INFO - at 0.4s,\tbest xgboost's error=2.1267,\tbest xgboost's error=2.1267\n",
- "[flaml.automl: 08-22 21:23:41] {1358} INFO - iteration 1, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:41] {1515} INFO - at 0.6s,\tbest xgboost's error=2.1267,\tbest xgboost's error=2.1267\n",
- "[flaml.automl: 08-22 21:23:41] {1358} INFO - iteration 2, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:41] {1515} INFO - at 0.7s,\tbest xgboost's error=0.8485,\tbest xgboost's error=0.8485\n",
- "[flaml.automl: 08-22 21:23:41] {1358} INFO - iteration 3, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:41] {1515} INFO - at 0.9s,\tbest xgboost's error=0.3799,\tbest xgboost's error=0.3799\n",
- "[flaml.automl: 08-22 21:23:41] {1358} INFO - iteration 4, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:41] {1515} INFO - at 1.0s,\tbest xgboost's error=0.3799,\tbest xgboost's error=0.3799\n",
- "[flaml.automl: 08-22 21:23:41] {1358} INFO - iteration 5, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:41] {1515} INFO - at 1.2s,\tbest xgboost's error=0.3799,\tbest xgboost's error=0.3799\n",
- "[flaml.automl: 08-22 21:23:41] {1358} INFO - iteration 6, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:41] {1515} INFO - at 1.4s,\tbest xgboost's error=0.2992,\tbest xgboost's error=0.2992\n",
- "[flaml.automl: 08-22 21:23:41] {1358} INFO - iteration 7, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:42] {1515} INFO - at 1.6s,\tbest xgboost's error=0.2992,\tbest xgboost's error=0.2992\n",
- "[flaml.automl: 08-22 21:23:42] {1358} INFO - iteration 8, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:42] {1515} INFO - at 1.8s,\tbest xgboost's error=0.2992,\tbest xgboost's error=0.2992\n",
- "[flaml.automl: 08-22 21:23:42] {1358} INFO - iteration 9, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:42] {1515} INFO - at 2.0s,\tbest xgboost's error=0.2513,\tbest xgboost's error=0.2513\n",
- "[flaml.automl: 08-22 21:23:42] {1358} INFO - iteration 10, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:42] {1515} INFO - at 2.2s,\tbest xgboost's error=0.2513,\tbest xgboost's error=0.2513\n",
- "[flaml.automl: 08-22 21:23:42] {1358} INFO - iteration 11, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:43] {1515} INFO - at 2.4s,\tbest xgboost's error=0.2513,\tbest xgboost's error=0.2513\n",
- "[flaml.automl: 08-22 21:23:43] {1358} INFO - iteration 12, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:43] {1515} INFO - at 2.6s,\tbest xgboost's error=0.2113,\tbest xgboost's error=0.2113\n",
- "[flaml.automl: 08-22 21:23:43] {1358} INFO - iteration 13, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:43] {1515} INFO - at 2.8s,\tbest xgboost's error=0.2113,\tbest xgboost's error=0.2113\n",
- "[flaml.automl: 08-22 21:23:43] {1358} INFO - iteration 14, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:43] {1515} INFO - at 3.2s,\tbest xgboost's error=0.2090,\tbest xgboost's error=0.2090\n",
- "[flaml.automl: 08-22 21:23:43] {1358} INFO - iteration 15, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:44] {1515} INFO - at 3.6s,\tbest xgboost's error=0.2090,\tbest xgboost's error=0.2090\n",
- "[flaml.automl: 08-22 21:23:44] {1358} INFO - iteration 16, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:44] {1515} INFO - at 4.1s,\tbest xgboost's error=0.1919,\tbest xgboost's error=0.1919\n",
- "[flaml.automl: 08-22 21:23:44] {1358} INFO - iteration 17, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:45] {1515} INFO - at 4.4s,\tbest xgboost's error=0.1919,\tbest xgboost's error=0.1919\n",
- "[flaml.automl: 08-22 21:23:45] {1358} INFO - iteration 18, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:47] {1515} INFO - at 6.9s,\tbest xgboost's error=0.1797,\tbest xgboost's error=0.1797\n",
- "[flaml.automl: 08-22 21:23:47] {1358} INFO - iteration 19, current learner xgboost\n",
- "[flaml.automl: 08-22 21:23:48] {1515} INFO - at 7.9s,\tbest xgboost's error=0.1797,\tbest xgboost's error=0.1797\n",
- "[flaml.automl: 08-22 21:23:48] {1358} INFO - iteration 20, current learner xgboost\n",
- "[flaml.automl: 08-22 21:24:00] {1515} INFO - at 20.2s,\tbest xgboost's error=0.1797,\tbest xgboost's error=0.1797\n",
- "[flaml.automl: 08-22 21:24:00] {1358} INFO - iteration 21, current learner xgboost\n",
- "[flaml.automl: 08-22 21:24:02] {1515} INFO - at 21.9s,\tbest xgboost's error=0.1797,\tbest xgboost's error=0.1797\n",
- "[flaml.automl: 08-22 21:24:02] {1358} INFO - iteration 22, current learner xgboost\n",
- "[flaml.automl: 08-22 21:24:06] {1515} INFO - at 25.8s,\tbest xgboost's error=0.1782,\tbest xgboost's error=0.1782\n",
- "[flaml.automl: 08-22 21:24:06] {1358} INFO - iteration 23, current learner xgboost\n",
- "[flaml.automl: 08-22 21:24:08] {1515} INFO - at 28.3s,\tbest xgboost's error=0.1782,\tbest xgboost's error=0.1782\n",
- "[flaml.automl: 08-22 21:24:08] {1358} INFO - iteration 24, current learner xgboost\n",
- "[flaml.automl: 08-22 21:24:15] {1515} INFO - at 34.7s,\tbest xgboost's error=0.1782,\tbest xgboost's error=0.1782\n",
- "[flaml.automl: 08-22 21:24:15] {1358} INFO - iteration 25, current learner xgboost\n",
- "[flaml.automl: 08-22 21:24:16] {1515} INFO - at 35.5s,\tbest xgboost's error=0.1782,\tbest xgboost's error=0.1782\n",
- "[flaml.automl: 08-22 21:24:16] {1358} INFO - iteration 26, current learner xgboost\n",
- "[flaml.automl: 08-22 21:24:39] {1515} INFO - at 58.5s,\tbest xgboost's error=0.1660,\tbest xgboost's error=0.1660\n",
- "[flaml.automl: 08-22 21:24:39] {1592} INFO - selected model: \n",
- "[flaml.automl: 08-22 21:24:43] {1633} INFO - retrain xgboost for 4.4s\n",
- "[flaml.automl: 08-22 21:24:43] {1636} INFO - retrained model: \n",
- "[flaml.automl: 08-22 21:24:43] {1199} INFO - fit succeeded\n",
- "[flaml.automl: 08-22 21:24:43] {1200} INFO - Time taken to find the best model: 58.49340343475342\n",
- "[flaml.automl: 08-22 21:24:43] {1205} WARNING - Time taken to find the best model is 97% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n"
+ "[flaml.automl: 09-29 23:06:46] {1446} INFO - Data split method: uniform\n",
+ "[flaml.automl: 09-29 23:06:46] {1450} INFO - Evaluation method: cv\n",
+ "[flaml.automl: 09-29 23:06:46] {1496} INFO - Minimizing error metric: 1-r2\n",
+ "[flaml.automl: 09-29 23:06:46] {1533} INFO - List of ML learners in AutoML Run: ['xgboost']\n",
+ "[flaml.automl: 09-29 23:06:46] {1763} INFO - iteration 0, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:47] {1880} INFO - Estimated sufficient time budget=2621s. Estimated necessary time budget=3s.\n",
+ "[flaml.automl: 09-29 23:06:47] {1952} INFO - at 0.3s,\testimator xgboost's best error=2.1267,\tbest estimator xgboost's best error=2.1267\n",
+ "[flaml.automl: 09-29 23:06:47] {1763} INFO - iteration 1, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:47] {1952} INFO - at 0.5s,\testimator xgboost's best error=2.1267,\tbest estimator xgboost's best error=2.1267\n",
+ "[flaml.automl: 09-29 23:06:47] {1763} INFO - iteration 2, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:47] {1952} INFO - at 0.6s,\testimator xgboost's best error=0.8485,\tbest estimator xgboost's best error=0.8485\n",
+ "[flaml.automl: 09-29 23:06:47] {1763} INFO - iteration 3, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:47] {1952} INFO - at 0.8s,\testimator xgboost's best error=0.3799,\tbest estimator xgboost's best error=0.3799\n",
+ "[flaml.automl: 09-29 23:06:47] {1763} INFO - iteration 4, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:47] {1952} INFO - at 1.0s,\testimator xgboost's best error=0.3799,\tbest estimator xgboost's best error=0.3799\n",
+ "[flaml.automl: 09-29 23:06:47] {1763} INFO - iteration 5, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:47] {1952} INFO - at 1.2s,\testimator xgboost's best error=0.3799,\tbest estimator xgboost's best error=0.3799\n",
+ "[flaml.automl: 09-29 23:06:47] {1763} INFO - iteration 6, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:48] {1952} INFO - at 1.5s,\testimator xgboost's best error=0.2992,\tbest estimator xgboost's best error=0.2992\n",
+ "[flaml.automl: 09-29 23:06:48] {1763} INFO - iteration 7, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:48] {1952} INFO - at 1.9s,\testimator xgboost's best error=0.2992,\tbest estimator xgboost's best error=0.2992\n",
+ "[flaml.automl: 09-29 23:06:48] {1763} INFO - iteration 8, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:49] {1952} INFO - at 2.2s,\testimator xgboost's best error=0.2992,\tbest estimator xgboost's best error=0.2992\n",
+ "[flaml.automl: 09-29 23:06:49] {1763} INFO - iteration 9, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:49] {1952} INFO - at 2.5s,\testimator xgboost's best error=0.2513,\tbest estimator xgboost's best error=0.2513\n",
+ "[flaml.automl: 09-29 23:06:49] {1763} INFO - iteration 10, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:49] {1952} INFO - at 2.8s,\testimator xgboost's best error=0.2513,\tbest estimator xgboost's best error=0.2513\n",
+ "[flaml.automl: 09-29 23:06:49] {1763} INFO - iteration 11, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:49] {1952} INFO - at 3.0s,\testimator xgboost's best error=0.2513,\tbest estimator xgboost's best error=0.2513\n",
+ "[flaml.automl: 09-29 23:06:49] {1763} INFO - iteration 12, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:50] {1952} INFO - at 3.3s,\testimator xgboost's best error=0.2113,\tbest estimator xgboost's best error=0.2113\n",
+ "[flaml.automl: 09-29 23:06:50] {1763} INFO - iteration 13, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:50] {1952} INFO - at 3.5s,\testimator xgboost's best error=0.2113,\tbest estimator xgboost's best error=0.2113\n",
+ "[flaml.automl: 09-29 23:06:50] {1763} INFO - iteration 14, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:50] {1952} INFO - at 4.0s,\testimator xgboost's best error=0.2090,\tbest estimator xgboost's best error=0.2090\n",
+ "[flaml.automl: 09-29 23:06:50] {1763} INFO - iteration 15, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:51] {1952} INFO - at 4.5s,\testimator xgboost's best error=0.2090,\tbest estimator xgboost's best error=0.2090\n",
+ "[flaml.automl: 09-29 23:06:51] {1763} INFO - iteration 16, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:51] {1952} INFO - at 5.2s,\testimator xgboost's best error=0.1919,\tbest estimator xgboost's best error=0.1919\n",
+ "[flaml.automl: 09-29 23:06:51] {1763} INFO - iteration 17, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:52] {1952} INFO - at 5.5s,\testimator xgboost's best error=0.1919,\tbest estimator xgboost's best error=0.1919\n",
+ "[flaml.automl: 09-29 23:06:52] {1763} INFO - iteration 18, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:54] {1952} INFO - at 8.0s,\testimator xgboost's best error=0.1797,\tbest estimator xgboost's best error=0.1797\n",
+ "[flaml.automl: 09-29 23:06:54] {1763} INFO - iteration 19, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:06:55] {1952} INFO - at 9.0s,\testimator xgboost's best error=0.1797,\tbest estimator xgboost's best error=0.1797\n",
+ "[flaml.automl: 09-29 23:06:55] {1763} INFO - iteration 20, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:07:08] {1952} INFO - at 21.8s,\testimator xgboost's best error=0.1797,\tbest estimator xgboost's best error=0.1797\n",
+ "[flaml.automl: 09-29 23:07:08] {1763} INFO - iteration 21, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:07:11] {1952} INFO - at 24.4s,\testimator xgboost's best error=0.1797,\tbest estimator xgboost's best error=0.1797\n",
+ "[flaml.automl: 09-29 23:07:11] {1763} INFO - iteration 22, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:07:16] {1952} INFO - at 30.0s,\testimator xgboost's best error=0.1782,\tbest estimator xgboost's best error=0.1782\n",
+ "[flaml.automl: 09-29 23:07:16] {1763} INFO - iteration 23, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:07:20] {1952} INFO - at 33.5s,\testimator xgboost's best error=0.1782,\tbest estimator xgboost's best error=0.1782\n",
+ "[flaml.automl: 09-29 23:07:20] {1763} INFO - iteration 24, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:07:29] {1952} INFO - at 42.3s,\testimator xgboost's best error=0.1782,\tbest estimator xgboost's best error=0.1782\n",
+ "[flaml.automl: 09-29 23:07:29] {1763} INFO - iteration 25, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:07:30] {1952} INFO - at 43.2s,\testimator xgboost's best error=0.1782,\tbest estimator xgboost's best error=0.1782\n",
+ "[flaml.automl: 09-29 23:07:30] {1763} INFO - iteration 26, current learner xgboost\n",
+ "[flaml.automl: 09-29 23:07:50] {1952} INFO - at 63.4s,\testimator xgboost's best error=0.1663,\tbest estimator xgboost's best error=0.1663\n",
+ "[flaml.automl: 09-29 23:07:50] {2059} INFO - selected model: \n",
+ "[flaml.automl: 09-29 23:07:55] {2122} INFO - retrain xgboost for 5.4s\n",
+ "[flaml.automl: 09-29 23:07:55] {2128} INFO - retrained model: \n",
+ "[flaml.automl: 09-29 23:07:55] {1557} INFO - fit succeeded\n",
+ "[flaml.automl: 09-29 23:07:55] {1558} INFO - Time taken to find the best model: 63.427649974823\n",
+ "[flaml.automl: 09-29 23:07:55] {1569} WARNING - Time taken to find the best model is 106% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n"
]
}
],
@@ -240,8 +241,8 @@
"name": "stdout",
"text": [
"Best hyperparmeter config: {'n_estimators': 776, 'max_leaves': 160, 'min_child_weight': 32.57408640781376, 'learning_rate': 0.03478685333241491, 'subsample': 0.9152991332236934, 'colsample_bylevel': 0.5656764254642628, 'colsample_bytree': 0.7313266091895249, 'reg_alpha': 0.005771390107656191, 'reg_lambda': 1.4912667278658753}\n",
- "Best r2 on validation data: 0.834\n",
- "Training duration of best run: 23 s\n"
+ "Best r2 on validation data: 0.8337\n",
+ "Training duration of best run: 20.25 s\n"
]
}
],
@@ -263,7 +264,7 @@
"output_type": "execute_result",
"data": {
"text/plain": [
- ""
+ ""
]
},
"metadata": {},
@@ -382,8 +383,7 @@
"{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 58, 'max_leaves': 8, 'min_child_weight': 51.84874392377363, 'learning_rate': 0.23511987355535005, 'subsample': 1.0, 'colsample_bylevel': 0.8182737361783602, 'colsample_bytree': 0.8031986460435498, 'reg_alpha': 0.00400039941928546, 'reg_lambda': 0.3870252968100477}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 58, 'max_leaves': 8, 'min_child_weight': 51.84874392377363, 'learning_rate': 0.23511987355535005, 'subsample': 1.0, 'colsample_bylevel': 0.8182737361783602, 'colsample_bytree': 0.8031986460435498, 'reg_alpha': 0.00400039941928546, 'reg_lambda': 0.3870252968100477}}\n",
"{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 101, 'max_leaves': 14, 'min_child_weight': 7.444058088783045, 'learning_rate': 0.39220715578198356, 'subsample': 1.0, 'colsample_bylevel': 0.6274332478496758, 'colsample_bytree': 0.7190251742957809, 'reg_alpha': 0.007212902167942765, 'reg_lambda': 0.20172056689658158}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 101, 'max_leaves': 14, 'min_child_weight': 7.444058088783045, 'learning_rate': 0.39220715578198356, 'subsample': 1.0, 'colsample_bylevel': 0.6274332478496758, 'colsample_bytree': 0.7190251742957809, 'reg_alpha': 0.007212902167942765, 'reg_lambda': 0.20172056689658158}}\n",
"{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 205, 'max_leaves': 30, 'min_child_weight': 5.450621032615104, 'learning_rate': 0.12229148765139466, 'subsample': 0.8895588746662894, 'colsample_bylevel': 0.47518959001130784, 'colsample_bytree': 0.6845612830806885, 'reg_alpha': 0.01126059820390593, 'reg_lambda': 0.08170816686602438}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 205, 'max_leaves': 30, 'min_child_weight': 5.450621032615104, 'learning_rate': 0.12229148765139466, 'subsample': 0.8895588746662894, 'colsample_bylevel': 0.47518959001130784, 'colsample_bytree': 0.6845612830806885, 'reg_alpha': 0.01126059820390593, 'reg_lambda': 0.08170816686602438}}\n",
- "{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 222, 'max_leaves': 62, 'min_child_weight': 7.5054716192185795, 'learning_rate': 0.04623175582706431, 'subsample': 0.8756054034199897, 'colsample_bylevel': 0.44768367042684304, 'colsample_bytree': 0.7352307811741962, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.6207832675443758}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 222, 'max_leaves': 62, 'min_child_weight': 7.5054716192185795, 'learning_rate': 0.04623175582706431, 'subsample': 0.8756054034199897, 'colsample_bylevel': 0.44768367042684304, 'colsample_bytree': 0.7352307811741962, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.6207832675443758}}\n",
- "{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 776, 'max_leaves': 160, 'min_child_weight': 32.57408640781376, 'learning_rate': 0.03478685333241491, 'subsample': 0.9152991332236934, 'colsample_bylevel': 0.5656764254642628, 'colsample_bytree': 0.7313266091895249, 'reg_alpha': 0.005771390107656191, 'reg_lambda': 1.4912667278658753}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 776, 'max_leaves': 160, 'min_child_weight': 32.57408640781376, 'learning_rate': 0.03478685333241491, 'subsample': 0.9152991332236934, 'colsample_bylevel': 0.5656764254642628, 'colsample_bytree': 0.7313266091895249, 'reg_alpha': 0.005771390107656191, 'reg_lambda': 1.4912667278658753}}\n"
+ "{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 222, 'max_leaves': 62, 'min_child_weight': 7.5054716192185795, 'learning_rate': 0.04623175582706431, 'subsample': 0.8756054034199897, 'colsample_bylevel': 0.44768367042684304, 'colsample_bytree': 0.7352307811741962, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.6207832675443758}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 222, 'max_leaves': 62, 'min_child_weight': 7.5054716192185795, 'learning_rate': 0.04623175582706431, 'subsample': 0.8756054034199897, 'colsample_bylevel': 0.44768367042684304, 'colsample_bytree': 0.7352307811741962, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.6207832675443758}}\n"
]
}
],
@@ -412,11 +412,10 @@
{
"output_type": "display_data",
"data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZAAAAEWCAYAAABIVsEJAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAgAElEQVR4nO3dfZhVZb3/8ffHEQQzRYQMEUSPRGoW5GSX2Sk1De1XQmWm/U4HTaM6WefkFalZ2rHjOZSVp66fPZCZWpoPpEiFkYrag5qOovJgKKIpIyqKmA8kAt/fH+seWmz23rNnzezZe2Y+r+va16x1r3vt9V1sZn/nvu+17qWIwMzMrKu2aXQAZmbWNzmBmJlZIU4gZmZWiBOImZkV4gRiZmaFOIGYmVkhTiBmdSDpnyUta3QcZvXkBGL9jqRHJR3eyBgi4g8RMaFe7y9psqTfS3pB0mpJt0o6ul7HMyvHCcSsAEktDTz2McDVwKXA7sCuwFnABwq8lyT5e8AK8X8cGzAkbSPpdEkPS3pW0lWShue2Xy3pSUnPp7/u98ttu1jSDyTNk/QScGhq6XxR0v1pnyslDUn1D5G0Mrd/xbpp+5ckrZL0hKSTJYWkvcucg4DvAF+PiAsj4vmI2BQRt0bEJ1Odr0n6eW6fcen9tk3rt0g6V9KfgJeBGZLaSo7zBUlz0/J2kr4l6TFJT0n6oaSh3fw4rB9wArGB5HPAVODdwG7Ac8AFue3XA+OB1wH3AJeV7P8x4FzgtcAfU9mxwJHAnsCbgROqHL9sXUlHAqcChwN7A4dUeY8JwBhgdpU6tfg4MJ3sXH4ITJA0Prf9Y8DlaXkm8AZgYopvNFmLxwY4JxAbSD4NnBkRKyPiFeBrwDEdf5lHxEUR8UJu21sk7ZTb/7qI+FP6i//vqex7EfFERKwBfkX2JVtJpbrHAj+NiCUR8XI6diW7pJ+raj3pCi5Ox9sQEc8D1wHHA6RE8kZgbmrxTAe+EBFrIuIF4L+B47p5fOsHnEBsINkDuFbSWklrgQeAjcCuklokzUzdW38DHk37jMjt/3iZ93wyt/wysEOV41equ1vJe5c7Todn089RVerUovQYl5MSCFnrY05KZiOB7YG7c/9uv03lNsA5gdhA8jhwVEQMy72GREQ72ZfmFLJupJ2AcWkf5fav19TVq8gGwzuMqVJ3Gdl5fLhKnZfIvvQ7vL5MndJzuQEYKWkiWSLp6L56BlgH7Jf7N9spIqolShsgnECsvxokaUjutS1ZX/+5kvYAkDRS0pRU/7XAK2R/4W9P1k3TW64CTpS0j6Ttga9WqhjZ8xdOBb4q6URJO6aLA94paVaqdi/wLkljUxfcGZ0FEBGvkl3ZdR4wnCyhEBGbgB8D50t6HYCk0ZImFz5b6zecQKy/mkf2l3PH62vAd4G5wO8kvQDcAbw91b8U+CvQDixN23pFRFwPfA+4GVieO/YrFerPBj4KfAJ4AngK+C+ycQwi4gbgSuB+4G7g1zWGcjlZC+zqiNiQKz+tI67UvXcj2WC+DXDyA6XMmoukfYDFwHYlX+RmTcUtELMmIOmD6X6LnYFvAL9y8rBm5wRi1hw+BTwNPEx2ZdhnGhuOWefchWVmZoW4BWJmZoVs2+gAetOIESNi3LhxjQ7DzKxPufvuu5+JiK1uHh1QCWTcuHG0tbV1XtHMzDaT9Ndy5e7CMjOzQpxAzMysECcQMzMrxAnEzMwKcQIxM7NCBtRVWNa75ixs57z5y3hi7Tp2GzaUGZMnMHXS6EaHZTZg1Pt30AnE6mLOwnbOuGYR617dCED72nWccc0iACcRs17QG7+DTiADRG+3Bs6bv2zzf9wO617dyJdm388v7nysbsc1s8zCx9ayfuOmLcrWvbqR8+YvcwIZ6LqSEBrRGnhi7bqy5aX/oc2sPir9rlX63SzCCaRJVUsQXU0IjWgNDGrZpux/4NHDhnLlpw6qyzHN7B8OnrmA9jLJYrdhQ3vsGE4gTaizBNHVhFDuPxHUtzUwZvhQHnnmJTblJnseOqiFGZP9IDuz3jBj8oQtvkeg538HnUCaUGcJoqsJYXCDWgO+CsuscTp+1/rtVViSjiR7TnULcGFEzCzZfj5waFrdHnhdRAxL2zYCi9K2xyLi6N6Juv46Gz/oakIobdFA77QGpk4a7YRh1kD1/h1sWAKR1AJcABwBrATukjQ3IpZ21ImIL+Tqfw6YlHuLdRExsbfi7WnV/jrfbdjQsq2MjgTR1YTQG3+JmNnA08gWyIHA8ohYASDpCmAKsLRC/eOBs3sptrrqbIyjs77LIgnBrQEz62mNTCCjgcdz6yuBt5erKGkPYE9gQa54iKQ2YAMwMyLm1CvQnlbLIPhuw4awYvVLBFnLozRBOCGYWaP1lUH044DZEZH/1t0jItol7QUskLQoIh4u3VHSdGA6wNixY3sn2k7Uco/EiB22Y8QO2zFl4mg+9vbmiNvMLK+RCaQdGJNb3z2VlXMc8Nl8QUS0p58rJN1CNj6yVQKJiFnALIDW1tYo3d4InY1xmJn1BY2cjfcuYLykPSUNJksSc0srSXojsDNwe65sZ0nbpeURwMFUHjvpVXMWtnPwzAXsefpvOHjmAuYs3Donzpg8gaGDWrYo8z0SZtbXNKwFEhEbJJ0CzCe7jPeiiFgi6RygLSI6kslxwBURkW897AP8SNImsiQ4M3/1VqPUeod4x/KXZt/P+o2byo5xmJk1O235vdy/tba2RltbW93ev9LUAYNbtmHS2GFblS9d9Tf2HbWju63MrKlJujsiWkvL/UCpHtTVCQT3HbUjUya61WFmfVNfuQqrT/DguJkNJG6B9CAPjpvZQOIWSA/y4LiZDSROID1s6qTRm+8md7eVmfVnTiAFeJpyMzMnkC5rxONhzcyakRNIF9UyEWLH/R1mZv2Zr8Lqolru9fD9HWY2ELgF0kW+18PMLOMWSBf5Xg8zs4xbIF3kez3MzDJOIAX4Xg8zM3dhmZlZQU4gZmZWiBOImZkV4gRiZmaFNDSBSDpS0jJJyyWdXmb7CZJWS7o3vU7ObZsm6aH0mta7kZuZWcOuwpLUAlwAHAGsBO6SNLfMs82vjIhTSvYdDpwNtAIB3J32fa4XQjczMxrbAjkQWB4RKyJiPXAFMKXGfScDN0TEmpQ0bgCOrFOcZmZWRiMTyGjg8dz6ylRW6sOS7pc0W9KYLu6LpOmS2iS1rV69uifiNjMzmn8Q/VfAuIh4M1kr45KuvkFEzIqI1ohoHTlyZI8HaGY2UDUygbQDY3Lru6eyzSLi2Yh4Ja1eCBxQ675mZlZfjUwgdwHjJe0paTBwHDA3X0HSqNzq0cADaXk+8F5JO0vaGXhvKjMzs17SsKuwImKDpFPIvvhbgIsiYomkc4C2iJgLfF7S0cAGYA1wQtp3jaSvkyUhgHMiYk2vn4SZ2QDW0MkUI2IeMK+k7Kzc8hnAGRX2vQi4qK4BmplZRc0+iG5mZk3KCcTMzApxAjEzs0KcQMzMrBAnEDMzK8QJxMzMCnECMTOzQpxAzMysECcQMzMrxAnEzMwKcQIxM7NCnEDMzKwQJxAzMyvECcTMzApxAjEzs0KcQMzMrJCGJhBJR0paJmm5pNPLbD9V0lJJ90u6SdIeuW0bJd2bXnNL9zUzs/pq2BMJJbUAFwBHACuBuyTNjYiluWoLgdaIeFnSZ4BvAh9N29ZFxMReDdrMzDZrZAvkQGB5RKyIiPXAFcCUfIWIuDkiXk6rdwC793KMZmZWQSMTyGjg8dz6ylRWyUnA9bn1IZLaJN0haWqlnSRNT/XaVq9e3b2Izcxss4Z1YXWFpH8BWoF354r3iIh2SXsBCyQtioiHS/eNiFnALIDW1tbolYDNzAaARrZA2oExufXdU9kWJB0OnAkcHRGvdJRHRHv6uQK4BZhUz2DNzGxLjUwgdwHjJe0paTBwHLDF1VSSJgE/IkseT+fKd5a0XVoeARwM5AffzcyszhrWhRURGySdAswHWoCLImKJpHOAtoiYC5wH7ABcLQngsYg4GtgH+JGkTWRJcGbJ1VtmZlZnDR0DiYh5wLySsrNyy4dX2O82YP/6RmdmZtX4TnQzMyukT1yF1SzmLGznvPnLeGLtOga1bMOY4UMbHZKZWcO4BVKjOQvbOeOaRbSvXUcA6zdu4pFnXmLOwq0uHDMzGxCcQGp03vxlrHt14xZlmyIrNzMbiJxAavTE2nVdKjcz6++cQGq027Dy4x2Vys3M+ruqCUTSjpL+qUz5m+sXUnOaMXkCQwe1bFE2dFALMyZPaFBEZmaNVTGBSDoW+AvwS0lLJL0tt/niegfWbKZOGs3/fGh/Brdk/2Sjhw3lfz60P1MnVZv/0cys/6p2Ge+XgQMiYpWkA4GfSTojIq4F1DvhNZepk0bzizsfA+DKTx3U4GjMzBqrWgJpiYhVABFxp6RDgV9LGgN4VlszswGu2hjIC/nxj5RMDiF76NN+dY7LzMyaXLUWyGco6aqKiBckHQkcW9eozMys6VVsgUTEfcAjkm4uKX81Ii6re2RmZtbUql7GGxEbgU2SduqleMzMrI+oZTLFF4FFkm4AXuoojIjP1y0qMzNrerUkkGvSy8zMbLNOE0hEXFKvg6cB+e+SPZHwwoiYWbJ9O+BS4ADgWeCjEfFo2nYGcBKwEfh8RMyvV5xmZra1hs2FJakFuAA4CtgXOF7SviXVTgKei4i9gfOBb6R99yV7hvp+wJHA99P7mZlZL2nkZIoHAssjYkVErAeuILvHJG8K0NECmg28R9nD0acAV0TEKxHxCLA8vZ+ZmfWSRiaQ0cDjufWVqaxsnYjYADwP7FLjvmZmVkedjoFIegMwA9gjXz8iDqtjXD1G0nRgOsDYsWMbHI2ZWf9Ry1VYVwM/BH5MNmDdU9qBMbn13VNZuTorJW0L7EQ2mF7LvgBExCxgFkBra6vn8DIz6yG1JJANEfGDOhz7LmC8pD3JvvyPAz5WUmcuMA24HTgGWBARIWkucLmk7wC7AeOBO+sQo5mZVVBLAvmVpH8DrgVe6SiMiDXdOXBEbJB0CjCf7DLeiyJiiaRzgLaImAv8hGwa+eXAGrIkQ6p3FbAU2AB8Nt01b2ZmvaSWBDIt/ZyRKwtgr+4ePCLmAfNKys7KLf8d+EiFfc8Fzu1uDGZmVkwtNxLu2RuBmJlZ31LLVViDyKZ2f1cqugX4UUS8Wse4zMysydXShfUDYBDw/bT+8VR2cr2CMjOz5ldLAnlbRLwlt75A0n31CsjMzPqGWu5E35h/tK2kvejZ+0HMzKwPqqUFMgO4WdIKskfc7gGcWNeozMys6dVyFdZNksYDE1LRsoh4pdo+ZmbW/1VMIJIOi4gFkj5UsmlvSUSEHzJlZjaAVWuBvBtYAHygzLbATyk0MxvQKiaQiDg7LZ6TnrmxWZq/yszMBrBarsL6ZZmy2T0diJmZ9S3VxkDeSPbI2J1KxkF2BIbUOzAzM2tu1cZAJgDvB4ax5TjIC8An6xmUmZk1v2pjINcB10k6KCJu78WYzMysD6jlRsKFkj5L1p21uesqIj5Rt6jMzKzp1TKI/jPg9cBk4Fayx8e+UM+gzMys+dWSQPaOiK8CL0XEJcD/Ad5e37DMzKzZ1ZJAOp77sVbSm4CdgNd156CShku6QdJD6efOZepMlHS7pCWS7pf00dy2iyU9Iune9JrYnXjMzKzrakkgs9IX/FeBuWTPIf9mN497OnBTRIwHbkrrpV4G/jUi9gOOBP5X0rDc9hkRMTG97u1mPGZm1kW1TKZ4YVq8lR54DnoyBTgkLV9C9pTD00qO+2Bu+QlJTwMjgbU9FIOZmXVDtRsJT622Y0R8pxvH3TUiVqXlJ4Fdq1WWdCAwGHg4V3yupLNILZhKMwRLmg5MBxg7dmw3QjYzs7xqLZDXpp8TgLeRdV9BdlPhnZ29saQbya7eKnVmfiUiQlJUeZ9RZFeCTYuITan4DLLEMxiYRdZ6Oafc/hExK9WhtbW14nHMzKxrqt1I+J8Akn4PvDUiXkjrXwN+09kbR8ThlbZJekrSqIhYlRLE0xXq7ZiOdWZE3JF7747WyyuSfgp8sbN4zMysZ9UyiL4rsD63vp5OupxqMBeYlpanAdeVVpA0GLgWuDQiZpdsG5V+CpgKLO5mPGZm1kW13Il+KXCnpGvT+lTg4m4edyZwlaSTgL8CxwJIagU+HREnp7J3AbtIOiHtd0K64uoySSPJHrF7L/DpbsZjZmZdVMtVWOdKuh7451R0YkQs7M5BI+JZ4D1lytuAk9Pyz4GfV9j/sO4c38zMuq/aVVg7RsTfJA0HHk2vjm3DI2JN/cMzM7NmVa0FcjnZdO53kz3CtoPSek/dE2JmZn1Qtauw3p9++vG1Zma2lWpdWG+ttmNE3NPz4ZiZWV9RrQvr21W2BeCBbDOzAaxaF9ahvRmImZn1LbXcB0Kaxn1ftnwi4aX1CsrMzJpfpwlE0tlkM+fuC8wDjgL+SHaDoZmZDVC1TGVyDNlNf09GxInAW8geKmVmZgNYLQlkXZoFd0Oa3PBpYEx9wzIzs2ZXyxhIW3oS4I/Jbip8Ebi9rlGZmVnTq3YfyAXA5RHxb6noh5J+C+wYEff3SnRmZta0qrVAHgS+laZOvwr4RXcnUTQzs/6j4hhIRHw3Ig4C3g08C1wk6S+Szpb0hl6L0MzMmlKng+gR8deI+EZETAKOJ3seyAN1j8zMzJpapwlE0raSPiDpMuB6YBnwobpHZmZmTa3aIPoRZC2O9wF3AlcA0yPipe4eND1j5EpgHNlzRo6NiOfK1NsILEqrj0XE0al8zxTPLmRXhn08ItaX7m9mZvVTrQVyBnAbsE9EHB0Rl/dE8khOB26KiPHATWm9nHURMTG9js6VfwM4PyL2Bp4DTuqhuMzMrEbVBtEPi4gLy7UMesAU4JK0fAnZuEpNJIlsJuDZRfY3M7OeUcud6PWwa0SsSstPArtWqDdEUpukOyR1JIldgLURsSGtrwRGVzqQpOnpPdpWr17dI8GbmVmNs/EWIelG4PVlNp2ZX4mIkBRl6gHsERHtkvYCFkhaBDzflTgiYhYwC6C1tbXScczMrIvqlkAi4vBK2yQ9JWlURKxKNyo+XeE92tPPFZJuASYBvwSGSdo2tUJ2B9p7/ATMzKyqRnVhzQWmpeVpwHWlFSTtLGm7tDwCOBhYGhEB3Ew2S3DF/c3MrL4alUBmAkdIegg4PK0jqVXShanOPmQTOd5HljBmRsTStO004FRJy8nGRH7Sq9GbmVn9urCqiYhnyZ4xUlreBpyclm8D9q+w/wrgwHrGaGZm1TWqBWJmZn2cE4iZmRXiBGJmZoU4gZiZWSFOIGZmVogTiJmZFeIEYmZmhTiBmJlZIU4gZmZWiBOImZkV4gRiZmaFOIGYmVkhTiBmZlaIE4iZmRXiBGJmZoU4gZiZWSFOIGZmVkhDEoik4ZJukPRQ+rlzmTqHSro39/q7pKlp28WSHsltm9j7Z2FmNrA1qgVyOnBTRIwHbkrrW4iImyNiYkRMBA4DXgZ+l6syo2N7RNzbK1GbmdlmjUogU4BL0vIlwNRO6h8DXB8RL9c1KjMzq1mjEsiuEbEqLT8J7NpJ/eOAX5SUnSvpfknnS9qu0o6Spktqk9S2evXqboRsZmZ5dUsgkm6UtLjMa0q+XkQEEFXeZxSwPzA/V3wG8EbgbcBw4LRK+0fErIhojYjWkSNHdueUzMwsZ9t6vXFEHF5pm6SnJI2KiFUpQTxd5a2OBa6NiFdz793RenlF0k+BL/ZI0GZmVrNGdWHNBaal5WnAdVXqHk9J91VKOkgS2fjJ4jrEaGZmVTQqgcwEjpD0EHB4WkdSq6QLOypJGgeMAW4t2f8ySYuARcAI4L96IWYzM8upWxdWNRHxLPCeMuVtwMm59UeB0WXqHVbP+MzMrHO+E93MzApxAjEzs0KcQMzMrBAnEDMzK8QJxMzMCnECMTOzQpxAzMysECcQMzMrxAnEzMwKcQIxM7NCnEDMzKwQJxAzMyvECcTMzApxAjEzs0KcQMzMrBAnEDMzK6QhCUTSRyQtkbRJUmuVekdKWiZpuaTTc+V7SvpzKr9S0uDeidzMzDo0qgWyGPgQ8PtKFSS1ABcARwH7AsdL2jdt/gZwfkTsDTwHnFTfcM3MrFRDEkhEPBARyzqpdiCwPCJWRMR64ApgiiQBhwGzU71LgKn1i9bMzMpp5jGQ0cDjufWVqWwXYG1EbCgpL0vSdEltktpWr15dt2DNzAaabev1xpJuBF5fZtOZEXFdvY5bKiJmAbMAWltbo7eOa2bW39UtgUTE4d18i3ZgTG5991T2LDBM0rapFdJRbmZmvaiZu7DuAsanK64GA8cBcyMigJuBY1K9aUCvtWjMzCzTqMt4PyhpJXAQ8BtJ81P5bpLmAaTWxSnAfOAB4KqIWJLe4jTgVEnLycZEftLb52BmNtDVrQurmoi4Fri2TPkTwPty6/OAeWXqrSC7SsvMzBqkmbuwzMysiTmBmJlZIU4gZmZWiBOImZkV0pBB9L5kzsJ2zpu/jCfWrmO3YUMZMmgbRuywXaPDMjNrOCeQKuYsbOeMaxax7tWNALSvXcc2anBQZmZNwl1YVZw3f9nm5NFhU8Dja9Y1KCIzs+bhBFLFE2vLJ4r1Gzf1ciRmZs3HCaSK3YYNLVs+ukK5mdlA4gRSxYzJExg6qGWLsqGDWpgxeUKDIjIzax4eRK9i6qTsMSP5q7BmTJ6wudzMbCBzAunE1EmjnTDMzMpwF5aZmRXiBGJmZoU4gZiZWSFOIGZmVogTiJmZFaLsEeMDg6TVwF+7sMsI4Jk6hdPbfC7Nqz+dj8+lOXX3XPaIiJGlhQMqgXSVpLaIaG10HD3B59K8+tP5+FyaU73OxV1YZmZWiBOImZkV4gRS3axGB9CDfC7Nqz+dj8+lOdXlXDwGYmZmhbgFYmZmhTiBmJlZIU4gZUg6UtIyScslnd7oeLpL0qOSFkm6V1Jbo+PpCkkXSXpa0uJc2XBJN0h6KP3cuZEx1qrCuXxNUnv6bO6V9L5GxlgrSWMk3SxpqaQlkv49lfe5z6bKufTVz2aIpDsl3ZfO5z9T+Z6S/py+166UNLjbx/IYyJYktQAPAkcAK4G7gOMjYmlDA+sGSY8CrRHR526KkvQu4EXg0oh4Uyr7JrAmImamBL9zRJzWyDhrUeFcvga8GBHfamRsXSVpFDAqIu6R9FrgbmAqcAJ97LOpci7H0jc/GwGviYgXJQ0C/gj8O3AqcE1EXCHph8B9EfGD7hzLLZCtHQgsj4gVEbEeuAKY0uCYBqyI+D2wpqR4CnBJWr6E7Je96VU4lz4pIlZFxD1p+QXgAWA0ffCzqXIufVJkXkyrg9IrgMOA2am8Rz4bJ5CtjQYez62vpA//Z0oC+J2kuyVNb3QwPWDXiFiVlp8Edm1kMD3gFEn3py6upu/yKSVpHDAJ+DN9/LMpORfoo5+NpBZJ9wJPAzcADwNrI2JDqtIj32tOIAPDOyPircBRwGdTV0q/EFkfbF/uh/0B8E/ARGAV8O3GhtM1knYAfgn8R0T8Lb+tr302Zc6lz342EbExIiYCu5P1qryxHsdxAtlaOzAmt757KuuzIqI9/XwauJbsP1Rf9lTqt+7ov366wfEUFhFPpV/2TcCP6UOfTepf/yVwWURck4r75GdT7lz68mfTISLWAjcDBwHDJHU8xrxHvtecQLZ2FzA+XbEwGDgOmNvgmAqT9Jo0MIik1wDvBRZX36vpzQWmpeVpwHUNjKVbOr5skw/SRz6bNFD7E+CBiPhOblOf+2wqnUsf/mxGShqWloeSXRD0AFkiOSZV65HPxldhlZEu1/tfoAW4KCLObXBIhUnai6zVAbAtcHlfOh9JvwAOIZuO+ingbGAOcBUwlmx6/mMjoukHpyucyyFkXSQBPAp8KjeG0LQkvRP4A7AI2JSKv0w2dtCnPpsq53I8ffOzeTPZIHkLWSPhqog4J30XXAEMBxYC/xIRr3TrWE4gZmZWhLuwzMysECcQMzMrxAnEzMwKcQIxM7NCnEDMzKwQJxDrNySdL+k/cuvzJV2YW/+2pFOr7H+xpGPS8i2SWsvUGSRpZppt9h5Jt0s6Km17VNKIAnFvPm6F7Rek2WCXSlqXmx32GEnzOq7570mSRkn6dZXtgyX9Pndjmg1ATiDWn/wJeAeApG3I7rfYL7f9HcBt3TzG14FRwJvS9DBTgdd28z2riojPpmkp3gc8HBET02t2RLwv3W3c004lu/u6UkzrgZuAj9bh2NZHOIFYf3Ib2ZQNkCWOxcALknaWtB2wD3CPpLMk3SVpsaRZ6U7kTknaHvgk8LmOG7DSdBdXlal7anr/xSWton9Nk/PdJ+lnZfb7emqRtNQY06OSRkgaJ+kvad8HJV0m6XBJf0qtpQNT/dekiQHvlLRQUqWZpj8M/Dbts1+qf2+KfXyqMwf4v7XEaf2Tm5/Wb0TEE5I2SBpL1tq4nWzG0YOA54FFEbFe0v+LiHMA0pf4+4Ff1XCIvYHHSicNLCXpAOBE4O2AgD9LuhVYD3wFeEdEPCNpeMl+55G1Zk6MYnf47g18BPgE2ZQ8HwPeCRxNdmf1VOBMYEFEfCJ1fd0p6caIeCkXx57Ac7m7lD8NfDciLkvT+3Qkt8XA2wrEaf2EWyDW39xGljw6EsjtufU/pTqHKnsy2yKyZyTsV+6NuuGdwLUR8VJ6LsM1wD+nY13d8WCvkik+vgrsFBGfLpg8AB6JiEVp8r8lwE3pvRYB41Kd9wKnp6m+bwGGkE07kjcKWJ1bvx34sqTTgD0iYl2KfyOwvmOuNRt4nECsv+kYB9mf7C/kO8haIO8AbpM0BPg+cExE7E/Wzz+kxvdeDoyVtGOPR521GA4obZV0UX5eo0259U38o7dBwIdz4yhjI+KBkvdZR+7fJCIuJ2vFrAPmSTosV3c74O/diNn6MCcQ629uI+uSWpOm4l4DDCNLIrfxjy/GZ5Q9/6Hi1U+lIuJlsllbv5u6cjpmPv1ISdU/AFMlbZ9mQP5gKlsAfETSLmnffLL4LTAT+E2d/6KfD3yuY9xH0qQydR7kHy2Wjgk5V0TE98hmcH1zKt8FeCYiXq1jvNbEnECsv6j9WKcAAADHSURBVFlEdvXVHSVlz0fEM+mKpR+TtU7mk/3l3xVfIeveWSppMfBroPRBSvcAFwN3ks1Oe2FELIyIJcC5wK2S7gO+U7Lf1Sm2uWka7nr4OtkjTu+XtCStbyGNhzwsae9UdCywOHV7vQm4NJUfCvymTnFaH+DZeM1sK5I+CBwQEV+pUuca4PSIeLD3IrNm4quwzGwrEXFtR1dbOakLb46Tx8DmFoiZmRXiMRAzMyvECcTMzApxAjEzs0KcQMzMrBAnEDMzK+T/A65/AUlJjMH8AAAAAElFTkSuQmCC",
"text/plain": [
""
- ],
- "image/svg+xml": "\n\n\n\n",
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZAAAAEWCAYAAABIVsEJAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAgAElEQVR4nO3de7xVdZ3/8dfbIyhWCggZIgiNhJcsqJOO1VSaBvYrwTLT5kKmUU01M/mLgixrbJxonMns8bMLmamleSFFKoy813g/BsnFSERTjjcUMVMSgc/vj/U9tNjuvdlnnbPP3vuc9/Px2I+91nd911qfBfvsz17f71rfpYjAzMysu3ZqdABmZtaanEDMzKwQJxAzMyvECcTMzApxAjEzs0KcQMzMrBAnELM6kPR3klY1Og6zenICsX5H0oOSjmxkDBHxm4iYWK/tS5oi6deSnpW0TtLNko6p1/7MynECMStAUlsD930ccAVwEbAPsBdwOvDeAtuSJH8PWCH+4NiAIWknSbMl3S/pKUmXSxqeW36FpMckPZN+3R+UW3aBpO9IWiTpOeDwdKbzWUn3pHUuk7Rrqv8OSWtz61esm5Z/TtKjkh6RdIqkkLRfmWMQ8A3gqxFxXkQ8ExFbI+LmiPhoqvMVST/OrTMubW/nNH+TpDMl3QI8D8yS1FGyn89IWpimd5H035IekvS4pO9KGtLD/w7rB5xAbCD5NDAdeDuwN/A0cG5u+TXABOCVwG+Bi0vW/xBwJvAK4H9T2fHAVGA88Drgw1X2X7aupKnAqcCRwH7AO6psYyIwBphfpU4t/hGYSXYs3wUmSpqQW/4h4JI0PRd4DTApxTea7IzHBjgnEBtIPg6cFhFrI+IF4CvAcV2/zCPi/Ih4Nrfs9ZL2yK1/dUTckn7x/yWVfSsiHomI9cDPyL5kK6lU93jghxGxIiKeT/uuZM/0/mitB13BBWl/myPiGeBq4ESAlEj2BxamM56ZwGciYn1EPAv8J3BCD/dv/YATiA0k+wJXSdogaQNwL7AF2EtSm6S5qXnrT8CDaZ0RufUfLrPNx3LTzwMvr7L/SnX3Ltl2uf10eSq9j6pSpxal+7iElEDIzj4WpGQ2EtgNuDv37/bLVG4DnBOIDSQPA0dHxNDca9eI6CT70pxG1oy0BzAuraPc+vUauvpRss7wLmOq1F1Fdhzvr1LnObIv/S6vKlOn9FiuBUZKmkSWSLqar54ENgIH5f7N9oiIaonSBggnEOuvBknaNffamayt/0xJ+wJIGilpWqr/CuAFsl/4u5E10/SVy4GTJB0gaTfgS5UqRvb8hVOBL0k6SdLu6eKAt0qal6otBd4maWxqgpuzowAi4kWyK7vOAoaTJRQiYivwfeBsSa8EkDRa0pTCR2v9hhOI9VeLyH45d72+ApwDLAR+JelZ4Hbg0FT/IuCPQCewMi3rExFxDfAt4EZgdW7fL1SoPx/4IPAR4BHgceA/yPoxiIhrgcuAe4C7gZ/XGMolZGdgV0TE5lz557viSs1715F15tsAJz9Qyqy5SDoAWA7sUvJFbtZUfAZi1gQkHZvutxgGfB34mZOHNTsnELPm8DHgCeB+sivDPtHYcMx2zE1YZmZWiM9AzMyskJ0bHUBfGjFiRIwbN67RYZiZtZS77777yYh4yc2jAyqBjBs3jo6Ojh1XNDOzbST9sVy5m7DMzKwQJxAzMyvECcTMzApxAjEzs0KcQMzMrJABdRWWmdlAsmBJJ2ctXsUjGzay99AhzJoykemTR/fa9p1AzLqp3n+UZr1hwZJO5ly5jI0vbgGgc8NG5ly5DKDXPq9OIC3CX1rNoS/+KM16w1mLV237nHbZ+OIWzlq8ygmkFRVNAv7Sah6V/ig/N/8efnLnQw2KyuylOjdsLFv+SIXyIpxA+khPkoC/tJpHpT/KTVu29nEkZtUNbtup7Ody76FDem0fTiC9LH+WsceQQUiw4fkX2UliS8nIx7UmAX9pNY9Kf5Sjhw7hso8d1oCIzMor/dEKMGRQG7Om9N7DJJ1AelHpf9iGjS9uW1aaPLrUkgT8pdU8+uKP0qw3dLVs+CqsFlGuqWlHakkC/tJqHn3xR2nWW6ZPHl3Xz6YTSC/qbudUrUnAX1rNpd5/lGatoqEJRNJU4BygDTgvIuaWLD8bODzN7ga8MiKGpmVbgGVp2UMRcUzfRL29fJ9HuX6OUm0SWyO6nQT8pWVmzaZhCURSG3AucBSwFrhL0sKIWNlVJyI+k6v/aWBybhMbI2JSX8VbTmnT0o6Sx5BBbXztfQc7EZhZv9DIsbAOAVZHxJqI2ARcCkyrUv9E4Cd9ElmNdtTnMXTIIIbtNgiR9XU4eZhZf9LIJqzRwMO5+bXAoeUqStoXGA/ckCveVVIHsBmYGxELKqw7E5gJMHbs2F4I+6+q9Xn857EH86FDe3d/ZmbNpFVG4z0BmB8R+Z/7+0ZEO/Ah4JuS/qbcihExLyLaI6J95MiXPNK3RyrdkDN66BAnDzPr9xqZQDqBMbn5fVJZOSdQ0nwVEZ3pfQ1wE9v3j9TVgiWdvGXuDXRu2IhKlvnyWjMbKBqZQO4CJkgaL2kwWZJYWFpJ0v7AMOC2XNkwSbuk6RHAW4CVpevWQ1fHedfd4fluc/dzmNlA0rA+kIjYLOlTwGKyy3jPj4gVks4AOiKiK5mcAFwasd0lTgcA35O0lSwJzs1fvVVPlTrOB7ftxC2zj+iLEMzMmkJD7wOJiEXAopKy00vmv1JmvVuBg+saXAWVOs49LpWZDTSt0oneNKp1nJuZDSROIN00a8pEhgxq267MHedmNhB5LKxu6uog/9z8e9i0ZSujPS6VmQ1QTiAFTJ88etszPDycupkNVE4gNSp9HO2ug3ZixMt3aXRYZmYN4wRSg3KPo92p9A5CM7MBxp3oNSh378fWgIfX997D6c3MWo0TSA1874eZ2Us5gdTA936Ymb2UE0gNfO+HmdlLuRO9Br73w8zspZxAauR7P8zMtucmLDMzK8QJxMzMCnECMTOzQpxAzMyskIYmEElTJa2StFrS7DLLPyxpnaSl6XVKbtkMSfel14y+jdzMzBp2FZakNuBc4ChgLXCXpIVlHk17WUR8qmTd4cCXgXayx5LfndZ9ug9CNzMzGnsGcgiwOiLWRMQm4FJgWo3rTgGujYj1KWlcC0ytU5xmZlZGIxPIaODh3PzaVFbq/ZLukTRf0phuroukmZI6JHWsW7euN+I2MzOavxP9Z8C4iHgd2VnGhd3dQETMi4j2iGgfOXJkrwdoZjZQNTKBdAJjcvP7pLJtIuKpiHghzZ4HvLHWdc3MrL4amUDuAiZIGi9pMHACsDBfQdKo3OwxwL1pejHwLknDJA0D3pXKzMysjzTsKqyI2CzpU2Rf/G3A+RGxQtIZQEdELAT+RdIxwGZgPfDhtO56SV8lS0IAZ0TE+j4/CDOzAayhgylGxCJgUUnZ6bnpOcCcCuueD5xf1wDNzKyiZu9ENzOzJuUEYmZmhTiBmJlZIU4gZmZWiBOImZkV4gRiZmaFOIGYmVkhTiBmZlaIE4iZmRXiBGJmZoU4gZiZWSFOIGZmVogTiJmZFeIEYmZmhTiBmJlZIU4gZmZWSEMTiKSpklZJWi1pdpnlp0paKekeSddL2je3bIukpem1sHRdMzOrr4Y9kVBSG3AucBSwFrhL0sKIWJmrtgRoj4jnJX0C+C/gg2nZxoiY1KdBm5nZNo08AzkEWB0RayJiE3ApMC1fISJujIjn0+ztwD59HKOZmVXQyAQyGng4N782lVVyMnBNbn5XSR2Sbpc0vdJKkmameh3r1q3rWcRmZrZNw5qwukPSPwDtwNtzxftGRKekVwM3SFoWEfeXrhsR84B5AO3t7dEnAZuZDQCNPAPpBMbk5vdJZduRdCRwGnBMRLzQVR4Rnel9DXATMLmewZqZ2fYamUDuAiZIGi9pMHACsN3VVJImA98jSx5P5MqHSdolTY8A3gLkO9/NzKzOGtaEFRGbJX0KWAy0AedHxApJZwAdEbEQOAt4OXCFJICHIuIY4ADge5K2kiXBuSVXb5mZWZ01tA8kIhYBi0rKTs9NH1lhvVuBg+sbnZmZVeM70c3MrBAnkBotWNLJkoc2cMcD63nL3BtYsOQl/f1mZgOKE0gNFizpZM6Vy9i0ZSsAnRs2MufKZU4iZjagOYHU4KzFq9j44pbtyja+uIWzFq9qUERmZo3nBFKDRzZs7Fa5mdlAUDWBSNpd0t+UKX9d/UJqPnsPHdKtcjOzgaBiApF0PPB74KeSVkh6U27xBfUOrJnMmjKRIYPatisbMqiNWVMmNigiM7PGq3YG8gXgjWnI9JOAH0k6Ni1T3SNrItMnj+Zr7zuYwW3ZP9fooUP42vsOZvrkamM/mpn1b9VuJGyLiEcBIuJOSYcDP5c0BhhwgxJOnzyan9z5EACXfeywBkdjZtZ41c5Ans33f6Rk8g6yZ3YcVOe4zMysyVU7A/kEJU1VEfGspKnA8XWNyszMml7FM5CI+B3wgKQbS8pfjIiL6x6ZmZk1taqX8UbEFmCrpD36KB4zM2sRtYzG+2dgmaRrgee6CiPiX+oWlZmZNb1aEsiV6WVmZrbNDhNIRFzYF4GYmVlraehYWJKmSlolabWk2WWW7yLpsrT8DknjcsvmpPJVkqb0ZdxmZtbABCKpDTgXOBo4EDhR0oEl1U4Gno6I/YCzga+ndQ8ke4b6QcBU4Ntpe2Zm1kcaeQZyCLA6ItZExCbgUrKbFPOmAV1NaPOBdyp7OPo04NKIeCEiHgBWp+2ZmVkf2WEfiKTXALOAffP1I+KIHu57NPBwbn4tcGilOhGxWdIzwJ6p/PaSdcsOTCVpJjATYOzYsT0M2czMutRyFdYVwHeB7wNbdlC36UTEPGAeQHt7+4Abw8vMrF5qSSCbI+I7ddh3JzAmN79PKitXZ62knYE9gKdqXNfMzOqolj6Qn0n6Z0mjJA3vevXCvu8CJkgaL2kwWaf4wpI6C4EZafo44IaIiFR+QrpKazwwAbizF2IyM7Ma1XIG0vUFPitXFsCre7Lj1KfxKWAx0AacHxErJJ0BdETEQuAHZM8hWQ2sJ0sypHqXAyuBzcAn07ArZmbWR2q5kXB8vXYeEYuARSVlp+em/wJ8oMK6ZwJn1is2MzOrrparsAaRDe3+tlR0E/C9iHixjnGZmVmTq6UJ6zvAIODbaf4fU9kp9QrKzMyaXy0J5E0R8frc/A2SflevgMzMrDXUchXWlvyjbSW9mha8H8TMzHpXLWcgs4AbJa0he8TtvsBJdY3KzMyaXi1XYV0vaQIwMRWtiogX6huWmZk1u4oJRNIREXGDpPeVLNpPEhHhh0yZmQ1g1c5A3g7cALy3zLLATyk0MxvQKiaQiPhymjwjDZm+TRo+xMzMBrBarsL6aZmy+b0diJmZtZZqfSD7kz3xb4+SfpDdgV3rHZiZmTW3an0gE4H3AEPZvh/kWeCj9QzKzMyaX7U+kKuBqyUdFhG39WFMZmbWAmq5kXCJpE+SNWdta7qKiI/ULSozM2t6tXSi/wh4FTAFuJns6X/P1jMoMzNrfrUkkP0i4kvAcxFxIfB/gEPrG5aZmTW7WhJI13M/Nkh6LdlzyV/Zk52mx+JeK+m+9D6sTJ1Jkm6TtELSPZI+mFt2gaQHJC1Nr0k9icfMzLqvlgQyL33Bf4nsWeQrgf/q4X5nA9dHxATg+jRf6nngnyLiIGAq8E1JQ3PLZ0XEpPRa2sN4zMysm2oZTPG8NHkzPXwOes404B1p+kKypxx+vmS/f8hNPyLpCWAksKGXYjAzsx6odiPhqdVWjIhv9GC/e0XEo2n6MWCvapUlHQIMBu7PFZ8p6XTSGYxHCDYz61vVzkBekd4nAm8ia76C7KbCO3e0YUnXkV29Veq0/ExEhKSosp1RZFeCzYiIral4DlniGQzMIzt7OaPC+jOBmQBjx47dUdhmZlajajcS/juApF8Db4iIZ9P8V4Bf7GjDEXFkpWWSHpc0KiIeTQniiQr1dk/7Oi0ibs9tu+vs5QVJPwQ+WyWOeWRJhvb29oqJyszMuqeWTvS9gE25+U3soMmpBguBGWl6BnB1aQVJg4GrgIsiYn7JslHpXcB0YHkP4zEzs26q5U70i4A7JV2V5qcDF/Rwv3OByyWdDPwROB5AUjvw8Yg4JZW9DdhT0ofTeh9OV1xdLGkk2SN2lwIf72E8ZmbWTbVchXWmpGuAv0tFJ0XEkp7sNCKeAt5ZprwDOCVN/xj4cYX1j+jJ/s3MrOeqXYW1e0T8SdJw4MH06lo2PCLW1z88MzNrVtXOQC4hG879brJH2HZRmu+te0LMzKwFVbsK6z3p3Y+vNTOzl6jWhPWGaitGxG97PxwzM2sV1Zqw/qfKsgDckW1mNoBVa8I6vC8DMTOz1lLLfSCkYdwPZPsnEl5Ur6DMzKz57TCBSPoy2ci5BwKLgKOB/yW7wdDMzAaoWoYyOY7spr/HIuIk4PVkD5UyM7MBrJYEsjGNgrs5DW74BDCmvmGZmVmzq6UPpCM9CfD7ZDcV/hm4ra5RmZlZ06t2H8i5wCUR8c+p6LuSfgnsHhH39El0ZmbWtKqdgfwB+O80dPrlwE96OoiimZn1HxX7QCLinIg4DHg78BRwvqTfS/qypNf0WYRmZtaUdtiJHhF/jIivR8Rk4ESy54HcW/fIzMysqe0wgUjaWdJ7JV0MXAOsAt5X98jMzKypVetEP4rsjOPdwJ3ApcDMiHiuj2IzM7MmVu0MZA5wK3BARBwTEZf0VvKQNFzStZLuS+/DKtTbImlpei3MlY+XdIek1ZIuS89PNzOzPlStE/2IiDgvIp6uw35nA9dHxATg+jRfzsaImJRex+TKvw6cHRH7AU8DJ9chRjMzq6KWO9HrYRpwYZq+kKxjviaSRDaU/Pwi65uZWe9oVALZKyIeTdOPAXtVqLerpA5Jt0vqShJ7AhsiYnOaXwuMrrQjSTPTNjrWrVvXK8GbmVmNw7kXIek64FVlFp2Wn4mIkBRl6gHsGxGdkl4N3CBpGfBMd+KIiHnAPID29vZK+zEzs26qWwKJiCMrLZP0uKRREfFoutP9iQrb6EzvayTdBEwGfgoMlbRzOgvZB+js9QMwM7OqGtWEtRCYkaZnAFeXVpA0TNIuaXoE8BZgZUQEcCPZMPMV1zczs/pqVAKZCxwl6T7gyDSPpHZJ56U6B5CNBPw7soQxNyJWpmWfB06VtJqsT+QHfRq9mZnVrwmrmoh4iuwhVaXlHcApafpW4OAK668BDqlnjGZmVl2jzkDMzKzFOYGYmVkhTiBmZlaIE4iZmRXiBGJmZoU4gZiZWSFOIGZmVogTiJmZFeIEYmZmhTiBmJlZIU4gZmZWiBOImZkV4gRiZmaFOIGYmVkhTiBmZlaIE4iZmRXSkAQiabikayXdl96HlalzuKSluddfJE1Pyy6Q9EBu2aS+Pwozs4GtUWcgs4HrI2ICcH2a305E3BgRkyJiEnAE8Dzwq1yVWV3LI2Jpn0RtZmbbNCqBTAMuTNMXAtN3UP844JqIeL6uUZmZWc0alUD2iohH0/RjwF47qH8C8JOSsjMl3SPpbEm7VFpR0kxJHZI61q1b14OQzcwsr24JRNJ1kpaXeU3L14uIAKLKdkYBBwOLc8VzgP2BNwHDgc9XWj8i5kVEe0S0jxw5sieHZGZmOTvXa8MRcWSlZZIelzQqIh5NCeKJKps6HrgqIl7Mbbvr7OUFST8EPtsrQZuZWc0a1YS1EJiRpmcAV1epeyIlzVcp6SBJZP0ny+sQo5mZVdGoBDIXOErSfcCRaR5J7ZLO66okaRwwBri5ZP2LJS0DlgEjgP/og5jNzCynbk1Y1UTEU8A7y5R3AKfk5h8ERpepd0Q94zMzsx3znehmZlaIE4iZmRXiBGJmZoU4gZiZWSFOIGZmVogTiJmZFeIEYmZmhTiBmJlZIU4gZmZWiBOImZkV4gRiZmaFOIGYmVkhTiBmZlaIE4iZmRXiBGJmZoU4gZiZWSENSSCSPiBphaStktqr1JsqaZWk1ZJm58rHS7ojlV8maXDfRG5mZl0adQayHHgf8OtKFSS1AecCRwMHAidKOjAt/jpwdkTsBzwNnFzfcM3MrFRDEkhE3BsRq3ZQ7RBgdUSsiYhNwKXANEkCjgDmp3oXAtPrF62ZmZXTzH0go4GHc/NrU9mewIaI2FxSXpakmZI6JHWsW7eubsGamQ00O9drw5KuA15VZtFpEXF1vfZbKiLmAfMA2tvbo6/2a2bW39UtgUTEkT3cRCcwJje/Typ7Chgqaed0FtJVbmZmfaiZm7DuAiakK64GAycACyMigBuB41K9GUCfndGYmVmmUZfxHitpLXAY8AtJi1P53pIWAaSzi08Bi4F7gcsjYkXaxOeBUyWtJusT+UFfH4OZ2UBXtyasaiLiKuCqMuWPAO/OzS8CFpWpt4bsKi0zM2uQZm7CMjOzJuYEYmZmhTiBmJlZIU4gZmZWSEM60VvJgiWdnLV4FY9s2Migtp0YM3xIo0MyM2sKPgOpYsGSTuZcuYzODRsJYNOWrTzw5HMsWOL7Fs3MnECqOGvxKja+uGW7sq2RlZuZDXROIFU8smFjt8rNzAYSJ5Aq9h5avr+jUrmZ2UDiBFLFrCkTGTKobbuyIYPamDVlYoMiMjNrHr4Kq4rpk7PHjHRdhbX30CHMmjJxW7mZ2UDmBLID0yePdsIwMyvDTVhmZlaIE4iZmRXiBGJmZoU4gZiZWSFOIGZmVoiyR4wPDJLWAX/s5mojgCfrEE6j9Lfjgf53TD6e5tffjmlHx7NvRIwsLRxQCaQISR0R0d7oOHpLfzse6H/H5ONpfv3tmIoej5uwzMysECcQMzMrxAlkx+Y1OoBe1t+OB/rfMfl4ml9/O6ZCx+M+EDMzK8RnIGZmVogTiJmZFeIEUoGkqZJWSVotaXaj4ylC0vmSnpC0PFc2XNK1ku5L78MaGWN3SBoj6UZJKyWtkPSvqbyVj2lXSXdK+l06pn9P5eMl3ZE+f5dJGtzoWLtDUpukJZJ+nuZb9ngkPShpmaSlkjpSWct+5gAkDZU0X9LvJd0r6bAix+QEUoakNuBc4GjgQOBESQc2NqpCLgCmlpTNBq6PiAnA9Wm+VWwG/m9EHAj8LfDJ9P/Sysf0AnBERLwemARMlfS3wNeBsyNiP+Bp4OQGxljEvwL35uZb/XgOj4hJuXslWvkzB3AO8MuI2B94Pdn/VfePKSL8KnkBhwGLc/NzgDmNjqvgsYwDlufmVwGj0vQoYFWjY+zBsV0NHNVfjgnYDfgtcCjZXcE7p/LtPo/N/gL2SV9ARwA/B9Tix/MgMKKkrGU/c8AewAOki6h6ckw+AylvNPBwbn5tKusP9oqIR9P0Y8BejQymKEnjgMnAHbT4MaXmnqXAE8C1wP3AhojYnKq02ufvm8DngK1pfk9a+3gC+JWkuyXNTGWt/JkbD6wDfpiaGc+T9DIKHJMTyAAW2U+NlruOW9LLgZ8C/xYRf8ova8VjiogtETGJ7Jf7IcD+DQ6pMEnvAZ6IiLsbHUsvemtEvIGsSfuTkt6WX9iCn7mdgTcA34mIycBzlDRX1XpMTiDldQJjcvP7pLL+4HFJowDS+xMNjqdbJA0iSx4XR8SVqbilj6lLRGwAbiRr4hkqqeuR0630+XsLcIykB4FLyZqxzqF1j4eI6EzvTwBXkSX5Vv7MrQXWRsQdaX4+WULp9jE5gZR3FzAhXTkyGDgBWNjgmHrLQmBGmp5B1o/QEiQJ+AFwb0R8I7eolY9ppKShaXoIWZ/OvWSJ5LhUrWWOKSLmRMQ+ETGO7O/mhoj4e1r0eCS9TNIruqaBdwHLaeHPXEQ8BjwsaWIqeiewkgLH5DvRK5D0brK23Dbg/Ig4s8EhdZuknwDvIBuq+XHgy8AC4HJgLNnQ9sdHxPpGxdgdkt4K/AZYxl/b179A1g/Sqsf0OuBCss/ZTsDlEXGGpFeT/YIfDiwB/iEiXmhcpN0n6R3AZyPiPa16PCnuq9LszsAlEXGmpD1p0c8cgKRJwHnAYGANcBLp80c3jskJxMzMCnETlpmZFeIEYmZmhTiBmJlZIU4gZmZWiBOImZkV4gRi/YaksyX9W25+saTzcvP/I+nUKutfIOm4NH2TpPYydQZJmptGLP2tpNskHZ2WPShpRIG4t+23wvJz00iwKyVtTNNLJR0naVHXfSS9SdKorpF0KywfLOnXuZsDbQByArH+5BbgzQCSdiK7/+Wg3PI3A7f2cB9fJRto7rVpeIvpwCt6uM2qIuKTaaiTdwP3RzYq7KSImB8R7053sPe2U4HvV4lpE9mAiR+sw76tRTiBWH9yK9kwIJAljuXAs5KGSdoFOAD4raTTJd0labmkeekO9x2StBvwUeDTXTfBRcTjEXF5mbqnpu0vLzkr+idJ9yh7/sePyqz31XRG0lZjTA9KGiFpXHq2wwWS/iDpYklHSrolnS0dkuq/TNlzYu5MA+lNq7Dp9wO/TOsclOovTbFPSHUWAH9fS5zWP/n00/qNiHhE0mZJY8nONm4jG/X1MOAZYFlEbJL0/yLiDID0Jf4e4Gc17GI/4KHSARxLSXoj2Z29h5INZX6HpJuBTcAXgTdHxJOShpesdxbZ2cxJUewO3/2ADwAfIRuO50PAW4FjyO7Ynw6cRja8yEdS09edkq6LiOdycYwHns7dKf5x4JyIuDgN7dOV3JYDbyoQp/UTPgOx/uZWsuTRlUBuy83fkuocruzpeMvIBvs7qNyGeuCtwFUR8VxE/Bm4Evi7tK8rIuJJgJJhIr4E7BERHy+YPAAeiIhlEbEVWEH2cKAgG/plXKrzLmC2suHjbwJ2JRu6Im8U2XDfXW4DviDp88C+EbExxb8F2NQ1VpQNPE4g1t909YMcTPYL+XayM5A3A7dK2hX4NnBcRBxM1s6/a43bXg2MlbR7r0ednTG8sfSspJvyY0ttzc1v5a+tDQLen+tHGRsR+ScHAmwk928SEZeQncVsBBZJOiJXdxfgLz2I2VqYE4j1N7eSNUmtT8/ZWA8MJUsit/LXL8YnlT1XpOLVT6Ui4nmy0YDPSU05XaPpfqCk6m+A6ZJ2SyO4HpvKbhoqEvwAAAEFSURBVAA+kAbioyRZ/BKYC/yizr/oFwOf7ur3kTS5TJ0/8Nczlq4BBddExLfIRmh9XSrfE3gyIl6sY7zWxJxArL9ZRnb11e0lZc9ExJPpiqXvk52dLCb75d8dXyRr3lkpaTnZI1tLH2r1W7Ln0d9JNlLweRGxJCJWAGcCN0v6HfCNkvWuSLEtVDa0ez18FRgE3CNpRZrfTuoPuV/SfqnoeGB5avZ6LXBRKj8c+EWd4rQW4NF4zewlJB0LvDEivlilzpXA7Ij4Q99FZs3EV2GZ2UtExFVdTW3lpCa8BU4eA5vPQMzMrBD3gZiZWSFOIGZmVogTiJmZFeIEYmZmhTiBmJlZIf8fKcylHHIb3RIAAAAASUVORK5CYII="
+ ]
},
"metadata": {
"needs_background": "light"
@@ -552,16 +551,16 @@
" '''XGBoostEstimator with the logregobj function as the objective function\n",
" '''\n",
"\n",
- " def __init__(self, **params):\n",
- " super().__init__(objective=logregobj, **params) \n",
+ " def __init__(self, **config):\n",
+ " super().__init__(objective=logregobj, **config) \n",
"\n",
"\n",
"class MyXGB2(XGBoostEstimator):\n",
" '''XGBoostEstimator with 'reg:squarederror' as the objective function\n",
" '''\n",
"\n",
- " def __init__(self, **params):\n",
- " super().__init__(objective='reg:gamma', **params)\n",
+ " def __init__(self, **config):\n",
+ " super().__init__(objective='reg:gamma', **config)\n",
"\n",
"\n",
"from flaml import AutoML\n",
@@ -582,244 +581,195 @@
"output_type": "stream",
"name": "stderr",
"text": [
- "[flaml.automl: 08-22 21:24:46] {1130} INFO - Evaluation method: holdout\n",
- "[flaml.automl: 08-22 21:24:46] {634} INFO - Using RepeatedKFold\n",
- "[flaml.automl: 08-22 21:24:46] {1155} INFO - Minimizing error metric: 1-r2\n",
- "[flaml.automl: 08-22 21:24:46] {1175} INFO - List of ML learners in AutoML Run: ['my_xgb1', 'my_xgb2']\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 0, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.1s,\tbest my_xgb1's error=53750617.1059,\tbest my_xgb1's error=53750617.1059\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 1, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.1s,\tbest my_xgb1's error=260718.5183,\tbest my_xgb1's error=260718.5183\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 2, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.1s,\tbest my_xgb2's error=4.1611,\tbest my_xgb2's error=4.1611\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 3, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.2s,\tbest my_xgb2's error=4.1611,\tbest my_xgb2's error=4.1611\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 4, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.2s,\tbest my_xgb1's error=260718.5183,\tbest my_xgb2's error=4.1611\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 5, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.2s,\tbest my_xgb1's error=260718.5183,\tbest my_xgb2's error=4.1611\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 6, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.3s,\tbest my_xgb1's error=40726.5769,\tbest my_xgb2's error=4.1611\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 7, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.3s,\tbest my_xgb1's error=1918.9637,\tbest my_xgb2's error=4.1611\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 8, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.3s,\tbest my_xgb1's error=1918.9637,\tbest my_xgb2's error=4.1611\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 9, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.3s,\tbest my_xgb1's error=1918.9637,\tbest my_xgb2's error=4.1611\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 10, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.4s,\tbest my_xgb2's error=4.1611,\tbest my_xgb2's error=4.1611\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 11, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.4s,\tbest my_xgb2's error=4.1603,\tbest my_xgb2's error=4.1603\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 12, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.5s,\tbest my_xgb2's error=4.1603,\tbest my_xgb2's error=4.1603\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 13, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.5s,\tbest my_xgb2's error=4.1603,\tbest my_xgb2's error=4.1603\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 14, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.5s,\tbest my_xgb1's error=1918.9637,\tbest my_xgb2's error=4.1603\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 15, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.6s,\tbest my_xgb2's error=3.8476,\tbest my_xgb2's error=3.8476\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 16, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.6s,\tbest my_xgb1's error=93.9115,\tbest my_xgb2's error=3.8476\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 17, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.7s,\tbest my_xgb2's error=0.3645,\tbest my_xgb2's error=0.3645\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 18, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.7s,\tbest my_xgb2's error=0.3645,\tbest my_xgb2's error=0.3645\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 19, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.8s,\tbest my_xgb2's error=0.3139,\tbest my_xgb2's error=0.3139\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 20, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.8s,\tbest my_xgb1's error=93.9115,\tbest my_xgb2's error=0.3139\n",
- "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 21, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 0.8s,\tbest my_xgb1's error=12.3445,\tbest my_xgb2's error=0.3139\n",
- "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 22, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 0.9s,\tbest my_xgb2's error=0.3139,\tbest my_xgb2's error=0.3139\n",
- "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 23, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.0s,\tbest my_xgb2's error=0.3139,\tbest my_xgb2's error=0.3139\n",
- "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 24, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.0s,\tbest my_xgb1's error=12.3445,\tbest my_xgb2's error=0.3139\n",
- "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 25, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.1s,\tbest my_xgb2's error=0.2254,\tbest my_xgb2's error=0.2254\n",
- "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 26, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.1s,\tbest my_xgb2's error=0.2254,\tbest my_xgb2's error=0.2254\n",
- "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 27, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.3s,\tbest my_xgb2's error=0.2254,\tbest my_xgb2's error=0.2254\n",
- "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 28, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.3s,\tbest my_xgb1's error=12.3445,\tbest my_xgb2's error=0.2254\n",
- "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 29, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.3s,\tbest my_xgb1's error=4.1558,\tbest my_xgb2's error=0.2254\n",
- "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 30, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.4s,\tbest my_xgb1's error=2.4948,\tbest my_xgb2's error=0.2254\n",
- "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 31, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.4s,\tbest my_xgb2's error=0.2254,\tbest my_xgb2's error=0.2254\n",
- "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 32, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.5s,\tbest my_xgb1's error=2.4948,\tbest my_xgb2's error=0.2254\n",
- "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 33, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.5s,\tbest my_xgb1's error=2.4948,\tbest my_xgb2's error=0.2254\n",
- "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 34, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.6s,\tbest my_xgb2's error=0.2254,\tbest my_xgb2's error=0.2254\n",
- "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 35, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.7s,\tbest my_xgb1's error=1.4151,\tbest my_xgb2's error=0.2254\n",
- "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 36, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.7s,\tbest my_xgb2's error=0.2254,\tbest my_xgb2's error=0.2254\n",
- "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 37, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.7s,\tbest my_xgb1's error=1.4151,\tbest my_xgb2's error=0.2254\n",
- "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 38, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:48] {1515} INFO - at 2.0s,\tbest my_xgb2's error=0.2254,\tbest my_xgb2's error=0.2254\n",
- "[flaml.automl: 08-22 21:24:48] {1358} INFO - iteration 39, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:48] {1515} INFO - at 2.0s,\tbest my_xgb2's error=0.2254,\tbest my_xgb2's error=0.2254\n",
- "[flaml.automl: 08-22 21:24:48] {1358} INFO - iteration 40, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:48] {1515} INFO - at 2.1s,\tbest my_xgb1's error=1.4151,\tbest my_xgb2's error=0.2254\n",
- "[flaml.automl: 08-22 21:24:48] {1358} INFO - iteration 41, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:48] {1515} INFO - at 2.4s,\tbest my_xgb2's error=0.1900,\tbest my_xgb2's error=0.1900\n",
- "[flaml.automl: 08-22 21:24:48] {1358} INFO - iteration 42, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:48] {1515} INFO - at 2.6s,\tbest my_xgb2's error=0.1900,\tbest my_xgb2's error=0.1900\n",
- "[flaml.automl: 08-22 21:24:48] {1358} INFO - iteration 43, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:48] {1515} INFO - at 2.8s,\tbest my_xgb2's error=0.1900,\tbest my_xgb2's error=0.1900\n",
- "[flaml.automl: 08-22 21:24:48] {1358} INFO - iteration 44, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:49] {1515} INFO - at 2.9s,\tbest my_xgb1's error=1.4151,\tbest my_xgb2's error=0.1900\n",
- "[flaml.automl: 08-22 21:24:49] {1358} INFO - iteration 45, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:49] {1515} INFO - at 2.9s,\tbest my_xgb2's error=0.1900,\tbest my_xgb2's error=0.1900\n",
- "[flaml.automl: 08-22 21:24:49] {1358} INFO - iteration 46, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:49] {1515} INFO - at 3.0s,\tbest my_xgb1's error=1.4151,\tbest my_xgb2's error=0.1900\n",
- "[flaml.automl: 08-22 21:24:49] {1358} INFO - iteration 47, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:49] {1515} INFO - at 3.0s,\tbest my_xgb1's error=1.4151,\tbest my_xgb2's error=0.1900\n",
- "[flaml.automl: 08-22 21:24:49] {1358} INFO - iteration 48, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:49] {1515} INFO - at 3.7s,\tbest my_xgb2's error=0.1900,\tbest my_xgb2's error=0.1900\n",
- "[flaml.automl: 08-22 21:24:49] {1358} INFO - iteration 49, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:49] {1515} INFO - at 3.8s,\tbest my_xgb1's error=1.4151,\tbest my_xgb2's error=0.1900\n",
- "[flaml.automl: 08-22 21:24:49] {1358} INFO - iteration 50, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:50] {1515} INFO - at 3.8s,\tbest my_xgb1's error=1.4151,\tbest my_xgb2's error=0.1900\n",
- "[flaml.automl: 08-22 21:24:50] {1358} INFO - iteration 51, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:50] {1515} INFO - at 3.9s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1900\n",
- "[flaml.automl: 08-22 21:24:50] {1358} INFO - iteration 52, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:50] {1515} INFO - at 4.0s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1900\n",
- "[flaml.automl: 08-22 21:24:50] {1358} INFO - iteration 53, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:50] {1515} INFO - at 4.1s,\tbest my_xgb2's error=0.1900,\tbest my_xgb2's error=0.1900\n",
- "[flaml.automl: 08-22 21:24:50] {1358} INFO - iteration 54, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:50] {1515} INFO - at 4.2s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1900\n",
- "[flaml.automl: 08-22 21:24:50] {1358} INFO - iteration 55, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:50] {1515} INFO - at 4.3s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1900\n",
- "[flaml.automl: 08-22 21:24:50] {1358} INFO - iteration 56, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:51] {1515} INFO - at 5.5s,\tbest my_xgb2's error=0.1865,\tbest my_xgb2's error=0.1865\n",
- "[flaml.automl: 08-22 21:24:51] {1358} INFO - iteration 57, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:52] {1515} INFO - at 5.8s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1865\n",
- "[flaml.automl: 08-22 21:24:52] {1358} INFO - iteration 58, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:52] {1515} INFO - at 6.3s,\tbest my_xgb2's error=0.1790,\tbest my_xgb2's error=0.1790\n",
- "[flaml.automl: 08-22 21:24:52] {1358} INFO - iteration 59, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:53] {1515} INFO - at 7.3s,\tbest my_xgb2's error=0.1790,\tbest my_xgb2's error=0.1790\n",
- "[flaml.automl: 08-22 21:24:53] {1358} INFO - iteration 60, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:53] {1515} INFO - at 7.4s,\tbest my_xgb2's error=0.1790,\tbest my_xgb2's error=0.1790\n",
- "[flaml.automl: 08-22 21:24:53] {1358} INFO - iteration 61, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:56] {1515} INFO - at 10.6s,\tbest my_xgb2's error=0.1707,\tbest my_xgb2's error=0.1707\n",
- "[flaml.automl: 08-22 21:24:56] {1358} INFO - iteration 62, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:56] {1515} INFO - at 10.7s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1707\n",
- "[flaml.automl: 08-22 21:24:56] {1358} INFO - iteration 63, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:57] {1515} INFO - at 10.8s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1707\n",
- "[flaml.automl: 08-22 21:24:57] {1358} INFO - iteration 64, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:24:58] {1515} INFO - at 12.3s,\tbest my_xgb2's error=0.1707,\tbest my_xgb2's error=0.1707\n",
- "[flaml.automl: 08-22 21:24:58] {1358} INFO - iteration 65, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:58] {1515} INFO - at 12.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1707\n",
- "[flaml.automl: 08-22 21:24:58] {1358} INFO - iteration 66, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:24:58] {1515} INFO - at 12.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1707\n",
- "[flaml.automl: 08-22 21:24:58] {1358} INFO - iteration 67, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:25:02] {1515} INFO - at 16.2s,\tbest my_xgb2's error=0.1707,\tbest my_xgb2's error=0.1707\n",
- "[flaml.automl: 08-22 21:25:02] {1358} INFO - iteration 68, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:25:06] {1515} INFO - at 20.1s,\tbest my_xgb2's error=0.1699,\tbest my_xgb2's error=0.1699\n",
- "[flaml.automl: 08-22 21:25:06] {1358} INFO - iteration 69, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:06] {1515} INFO - at 20.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1699\n",
- "[flaml.automl: 08-22 21:25:06] {1358} INFO - iteration 70, current learner my_xgb2\n",
- "[flaml.automl: 08-22 21:25:11] {1515} INFO - at 25.4s,\tbest my_xgb2's error=0.1685,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:11] {1358} INFO - iteration 71, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:11] {1515} INFO - at 25.4s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:11] {1358} INFO - iteration 72, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:11] {1515} INFO - at 25.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:11] {1358} INFO - iteration 73, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:11] {1515} INFO - at 25.6s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:11] {1358} INFO - iteration 74, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:11] {1515} INFO - at 25.8s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:11] {1358} INFO - iteration 75, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:12] {1515} INFO - at 25.8s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:12] {1358} INFO - iteration 76, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:12] {1515} INFO - at 25.9s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:12] {1358} INFO - iteration 77, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:12] {1515} INFO - at 26.1s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:12] {1358} INFO - iteration 78, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:12] {1515} INFO - at 26.2s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:12] {1358} INFO - iteration 79, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:12] {1515} INFO - at 26.2s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:12] {1358} INFO - iteration 80, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:12] {1515} INFO - at 26.4s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:12] {1358} INFO - iteration 81, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:12] {1515} INFO - at 26.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:12] {1358} INFO - iteration 82, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:12] {1515} INFO - at 26.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:12] {1358} INFO - iteration 83, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:12] {1515} INFO - at 26.7s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:12] {1358} INFO - iteration 84, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 26.9s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 85, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 27.0s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 86, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 27.1s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 87, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 27.2s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 88, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 27.3s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 89, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 27.4s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 90, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 27.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 91, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 27.6s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 92, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 27.7s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 93, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 27.8s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 94, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:14] {1515} INFO - at 27.9s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:14] {1358} INFO - iteration 95, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:14] {1515} INFO - at 28.0s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:14] {1358} INFO - iteration 96, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:14] {1515} INFO - at 28.2s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:14] {1358} INFO - iteration 97, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:14] {1515} INFO - at 28.3s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:14] {1358} INFO - iteration 98, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:14] {1515} INFO - at 28.4s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:14] {1358} INFO - iteration 99, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:14] {1515} INFO - at 28.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:14] {1358} INFO - iteration 100, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:14] {1515} INFO - at 28.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:14] {1358} INFO - iteration 101, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:14] {1515} INFO - at 28.8s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:14] {1358} INFO - iteration 102, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 28.8s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 103, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 28.9s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 104, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 29.1s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 105, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 29.1s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 106, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 29.3s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 107, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 29.3s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 108, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 29.4s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 109, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 29.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 110, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 29.6s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 111, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 29.7s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 112, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:16] {1515} INFO - at 29.9s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:16] {1358} INFO - iteration 113, current learner my_xgb1\n",
- "[flaml.automl: 08-22 21:25:16] {1515} INFO - at 29.9s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n",
- "[flaml.automl: 08-22 21:25:16] {1592} INFO - selected model: \n",
- "[flaml.automl: 08-22 21:25:20] {1633} INFO - retrain my_xgb2 for 4.5s\n",
- "[flaml.automl: 08-22 21:25:20] {1636} INFO - retrained model: \n",
- "[flaml.automl: 08-22 21:25:20] {1199} INFO - fit succeeded\n",
- "[flaml.automl: 08-22 21:25:20] {1200} INFO - Time taken to find the best model: 25.375203132629395\n",
- "[flaml.automl: 08-22 21:25:20] {1205} WARNING - Time taken to find the best model is 85% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n"
+ "[flaml.automl: 09-29 23:08:00] {1446} INFO - Data split method: uniform\n",
+ "[flaml.automl: 09-29 23:08:00] {1450} INFO - Evaluation method: holdout\n",
+ "[flaml.automl: 09-29 23:08:00] {1496} INFO - Minimizing error metric: 1-r2\n",
+ "[flaml.automl: 09-29 23:08:00] {1533} INFO - List of ML learners in AutoML Run: ['my_xgb1', 'my_xgb2']\n",
+ "[flaml.automl: 09-29 23:08:00] {1763} INFO - iteration 0, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:00] {1880} INFO - Estimated sufficient time budget=443s. Estimated necessary time budget=0s.\n",
+ "[flaml.automl: 09-29 23:08:00] {1952} INFO - at 0.1s,\testimator my_xgb1's best error=53750617.1059,\tbest estimator my_xgb1's best error=53750617.1059\n",
+ "[flaml.automl: 09-29 23:08:00] {1763} INFO - iteration 1, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:00] {1952} INFO - at 0.1s,\testimator my_xgb1's best error=260718.5183,\tbest estimator my_xgb1's best error=260718.5183\n",
+ "[flaml.automl: 09-29 23:08:00] {1763} INFO - iteration 2, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:00] {1952} INFO - at 0.2s,\testimator my_xgb2's best error=4.1611,\tbest estimator my_xgb2's best error=4.1611\n",
+ "[flaml.automl: 09-29 23:08:00] {1763} INFO - iteration 3, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:00] {1952} INFO - at 0.2s,\testimator my_xgb2's best error=4.1611,\tbest estimator my_xgb2's best error=4.1611\n",
+ "[flaml.automl: 09-29 23:08:00] {1763} INFO - iteration 4, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:00] {1952} INFO - at 0.3s,\testimator my_xgb1's best error=260718.5183,\tbest estimator my_xgb2's best error=4.1611\n",
+ "[flaml.automl: 09-29 23:08:00] {1763} INFO - iteration 5, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:00] {1952} INFO - at 0.3s,\testimator my_xgb1's best error=260718.5183,\tbest estimator my_xgb2's best error=4.1611\n",
+ "[flaml.automl: 09-29 23:08:00] {1763} INFO - iteration 6, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:00] {1952} INFO - at 0.4s,\testimator my_xgb1's best error=40726.5769,\tbest estimator my_xgb2's best error=4.1611\n",
+ "[flaml.automl: 09-29 23:08:00] {1763} INFO - iteration 7, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:00] {1952} INFO - at 0.4s,\testimator my_xgb1's best error=1918.9637,\tbest estimator my_xgb2's best error=4.1611\n",
+ "[flaml.automl: 09-29 23:08:00] {1763} INFO - iteration 8, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:00] {1952} INFO - at 0.5s,\testimator my_xgb1's best error=1918.9637,\tbest estimator my_xgb2's best error=4.1611\n",
+ "[flaml.automl: 09-29 23:08:00] {1763} INFO - iteration 9, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:00] {1952} INFO - at 0.5s,\testimator my_xgb1's best error=1918.9637,\tbest estimator my_xgb2's best error=4.1611\n",
+ "[flaml.automl: 09-29 23:08:00] {1763} INFO - iteration 10, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:00] {1952} INFO - at 0.6s,\testimator my_xgb2's best error=4.1611,\tbest estimator my_xgb2's best error=4.1611\n",
+ "[flaml.automl: 09-29 23:08:00] {1763} INFO - iteration 11, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:00] {1952} INFO - at 0.6s,\testimator my_xgb2's best error=4.1603,\tbest estimator my_xgb2's best error=4.1603\n",
+ "[flaml.automl: 09-29 23:08:00] {1763} INFO - iteration 12, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:00] {1952} INFO - at 0.7s,\testimator my_xgb2's best error=4.1603,\tbest estimator my_xgb2's best error=4.1603\n",
+ "[flaml.automl: 09-29 23:08:00] {1763} INFO - iteration 13, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:00] {1952} INFO - at 0.7s,\testimator my_xgb2's best error=4.1603,\tbest estimator my_xgb2's best error=4.1603\n",
+ "[flaml.automl: 09-29 23:08:00] {1763} INFO - iteration 14, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:01] {1952} INFO - at 0.8s,\testimator my_xgb1's best error=1918.9637,\tbest estimator my_xgb2's best error=4.1603\n",
+ "[flaml.automl: 09-29 23:08:01] {1763} INFO - iteration 15, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:01] {1952} INFO - at 0.8s,\testimator my_xgb2's best error=3.8476,\tbest estimator my_xgb2's best error=3.8476\n",
+ "[flaml.automl: 09-29 23:08:01] {1763} INFO - iteration 16, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:01] {1952} INFO - at 0.9s,\testimator my_xgb1's best error=93.9115,\tbest estimator my_xgb2's best error=3.8476\n",
+ "[flaml.automl: 09-29 23:08:01] {1763} INFO - iteration 17, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:01] {1952} INFO - at 1.0s,\testimator my_xgb2's best error=0.3645,\tbest estimator my_xgb2's best error=0.3645\n",
+ "[flaml.automl: 09-29 23:08:01] {1763} INFO - iteration 18, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:01] {1952} INFO - at 1.1s,\testimator my_xgb2's best error=0.3645,\tbest estimator my_xgb2's best error=0.3645\n",
+ "[flaml.automl: 09-29 23:08:01] {1763} INFO - iteration 19, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:01] {1952} INFO - at 1.2s,\testimator my_xgb2's best error=0.3139,\tbest estimator my_xgb2's best error=0.3139\n",
+ "[flaml.automl: 09-29 23:08:01] {1763} INFO - iteration 20, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:01] {1952} INFO - at 1.2s,\testimator my_xgb1's best error=93.9115,\tbest estimator my_xgb2's best error=0.3139\n",
+ "[flaml.automl: 09-29 23:08:01] {1763} INFO - iteration 21, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:01] {1952} INFO - at 1.3s,\testimator my_xgb1's best error=12.3445,\tbest estimator my_xgb2's best error=0.3139\n",
+ "[flaml.automl: 09-29 23:08:01] {1763} INFO - iteration 22, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:01] {1952} INFO - at 1.4s,\testimator my_xgb2's best error=0.3139,\tbest estimator my_xgb2's best error=0.3139\n",
+ "[flaml.automl: 09-29 23:08:01] {1763} INFO - iteration 23, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:01] {1952} INFO - at 1.4s,\testimator my_xgb2's best error=0.3139,\tbest estimator my_xgb2's best error=0.3139\n",
+ "[flaml.automl: 09-29 23:08:01] {1763} INFO - iteration 24, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:01] {1952} INFO - at 1.5s,\testimator my_xgb1's best error=12.3445,\tbest estimator my_xgb2's best error=0.3139\n",
+ "[flaml.automl: 09-29 23:08:01] {1763} INFO - iteration 25, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:01] {1952} INFO - at 1.6s,\testimator my_xgb2's best error=0.2254,\tbest estimator my_xgb2's best error=0.2254\n",
+ "[flaml.automl: 09-29 23:08:01] {1763} INFO - iteration 26, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:01] {1952} INFO - at 1.7s,\testimator my_xgb2's best error=0.2254,\tbest estimator my_xgb2's best error=0.2254\n",
+ "[flaml.automl: 09-29 23:08:01] {1763} INFO - iteration 27, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:02] {1952} INFO - at 1.9s,\testimator my_xgb2's best error=0.2254,\tbest estimator my_xgb2's best error=0.2254\n",
+ "[flaml.automl: 09-29 23:08:02] {1763} INFO - iteration 28, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:02] {1952} INFO - at 1.9s,\testimator my_xgb1's best error=12.3445,\tbest estimator my_xgb2's best error=0.2254\n",
+ "[flaml.automl: 09-29 23:08:02] {1763} INFO - iteration 29, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:02] {1952} INFO - at 2.0s,\testimator my_xgb1's best error=4.1558,\tbest estimator my_xgb2's best error=0.2254\n",
+ "[flaml.automl: 09-29 23:08:02] {1763} INFO - iteration 30, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:02] {1952} INFO - at 2.0s,\testimator my_xgb1's best error=2.4948,\tbest estimator my_xgb2's best error=0.2254\n",
+ "[flaml.automl: 09-29 23:08:02] {1763} INFO - iteration 31, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:02] {1952} INFO - at 2.2s,\testimator my_xgb2's best error=0.2254,\tbest estimator my_xgb2's best error=0.2254\n",
+ "[flaml.automl: 09-29 23:08:02] {1763} INFO - iteration 32, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:02] {1952} INFO - at 2.2s,\testimator my_xgb1's best error=2.4948,\tbest estimator my_xgb2's best error=0.2254\n",
+ "[flaml.automl: 09-29 23:08:02] {1763} INFO - iteration 33, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:02] {1952} INFO - at 2.3s,\testimator my_xgb1's best error=2.4948,\tbest estimator my_xgb2's best error=0.2254\n",
+ "[flaml.automl: 09-29 23:08:02] {1763} INFO - iteration 34, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:02] {1952} INFO - at 2.5s,\testimator my_xgb2's best error=0.2254,\tbest estimator my_xgb2's best error=0.2254\n",
+ "[flaml.automl: 09-29 23:08:02] {1763} INFO - iteration 35, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:02] {1952} INFO - at 2.6s,\testimator my_xgb1's best error=1.4151,\tbest estimator my_xgb2's best error=0.2254\n",
+ "[flaml.automl: 09-29 23:08:02] {1763} INFO - iteration 36, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:02] {1952} INFO - at 2.6s,\testimator my_xgb2's best error=0.2254,\tbest estimator my_xgb2's best error=0.2254\n",
+ "[flaml.automl: 09-29 23:08:02] {1763} INFO - iteration 37, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:02] {1952} INFO - at 2.7s,\testimator my_xgb1's best error=1.4151,\tbest estimator my_xgb2's best error=0.2254\n",
+ "[flaml.automl: 09-29 23:08:02] {1763} INFO - iteration 38, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:03] {1952} INFO - at 3.0s,\testimator my_xgb2's best error=0.2254,\tbest estimator my_xgb2's best error=0.2254\n",
+ "[flaml.automl: 09-29 23:08:03] {1763} INFO - iteration 39, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:03] {1952} INFO - at 3.1s,\testimator my_xgb2's best error=0.2254,\tbest estimator my_xgb2's best error=0.2254\n",
+ "[flaml.automl: 09-29 23:08:03] {1763} INFO - iteration 40, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:03] {1952} INFO - at 3.1s,\testimator my_xgb1's best error=1.4151,\tbest estimator my_xgb2's best error=0.2254\n",
+ "[flaml.automl: 09-29 23:08:03] {1763} INFO - iteration 41, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:03] {1952} INFO - at 3.6s,\testimator my_xgb2's best error=0.1900,\tbest estimator my_xgb2's best error=0.1900\n",
+ "[flaml.automl: 09-29 23:08:03] {1763} INFO - iteration 42, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:04] {1952} INFO - at 4.0s,\testimator my_xgb2's best error=0.1900,\tbest estimator my_xgb2's best error=0.1900\n",
+ "[flaml.automl: 09-29 23:08:04] {1763} INFO - iteration 43, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:04] {1952} INFO - at 4.2s,\testimator my_xgb2's best error=0.1900,\tbest estimator my_xgb2's best error=0.1900\n",
+ "[flaml.automl: 09-29 23:08:04] {1763} INFO - iteration 44, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:04] {1952} INFO - at 4.3s,\testimator my_xgb1's best error=1.4151,\tbest estimator my_xgb2's best error=0.1900\n",
+ "[flaml.automl: 09-29 23:08:04] {1763} INFO - iteration 45, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:04] {1952} INFO - at 4.3s,\testimator my_xgb2's best error=0.1900,\tbest estimator my_xgb2's best error=0.1900\n",
+ "[flaml.automl: 09-29 23:08:04] {1763} INFO - iteration 46, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:04] {1952} INFO - at 4.4s,\testimator my_xgb1's best error=1.4151,\tbest estimator my_xgb2's best error=0.1900\n",
+ "[flaml.automl: 09-29 23:08:04] {1763} INFO - iteration 47, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:04] {1952} INFO - at 4.4s,\testimator my_xgb1's best error=1.4151,\tbest estimator my_xgb2's best error=0.1900\n",
+ "[flaml.automl: 09-29 23:08:04] {1763} INFO - iteration 48, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:05] {1952} INFO - at 5.2s,\testimator my_xgb2's best error=0.1900,\tbest estimator my_xgb2's best error=0.1900\n",
+ "[flaml.automl: 09-29 23:08:05] {1763} INFO - iteration 49, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:05] {1952} INFO - at 5.3s,\testimator my_xgb1's best error=1.4151,\tbest estimator my_xgb2's best error=0.1900\n",
+ "[flaml.automl: 09-29 23:08:05] {1763} INFO - iteration 50, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:05] {1952} INFO - at 5.3s,\testimator my_xgb1's best error=1.4151,\tbest estimator my_xgb2's best error=0.1900\n",
+ "[flaml.automl: 09-29 23:08:05] {1763} INFO - iteration 51, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:05] {1952} INFO - at 5.4s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1900\n",
+ "[flaml.automl: 09-29 23:08:05] {1763} INFO - iteration 52, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:05] {1952} INFO - at 5.5s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1900\n",
+ "[flaml.automl: 09-29 23:08:05] {1763} INFO - iteration 53, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:05] {1952} INFO - at 5.6s,\testimator my_xgb2's best error=0.1900,\tbest estimator my_xgb2's best error=0.1900\n",
+ "[flaml.automl: 09-29 23:08:05] {1763} INFO - iteration 54, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:05] {1952} INFO - at 5.7s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1900\n",
+ "[flaml.automl: 09-29 23:08:05] {1763} INFO - iteration 55, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:05] {1952} INFO - at 5.7s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1900\n",
+ "[flaml.automl: 09-29 23:08:05] {1763} INFO - iteration 56, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:07] {1952} INFO - at 7.1s,\testimator my_xgb2's best error=0.1865,\tbest estimator my_xgb2's best error=0.1865\n",
+ "[flaml.automl: 09-29 23:08:07] {1763} INFO - iteration 57, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:07] {1952} INFO - at 7.4s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1865\n",
+ "[flaml.automl: 09-29 23:08:07] {1763} INFO - iteration 58, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:08] {1952} INFO - at 7.9s,\testimator my_xgb2's best error=0.1790,\tbest estimator my_xgb2's best error=0.1790\n",
+ "[flaml.automl: 09-29 23:08:08] {1763} INFO - iteration 59, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:09] {1952} INFO - at 9.1s,\testimator my_xgb2's best error=0.1790,\tbest estimator my_xgb2's best error=0.1790\n",
+ "[flaml.automl: 09-29 23:08:09] {1763} INFO - iteration 60, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:09] {1952} INFO - at 9.2s,\testimator my_xgb2's best error=0.1790,\tbest estimator my_xgb2's best error=0.1790\n",
+ "[flaml.automl: 09-29 23:08:09] {1763} INFO - iteration 61, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:13] {1952} INFO - at 12.8s,\testimator my_xgb2's best error=0.1707,\tbest estimator my_xgb2's best error=0.1707\n",
+ "[flaml.automl: 09-29 23:08:13] {1763} INFO - iteration 62, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:13] {1952} INFO - at 12.9s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1707\n",
+ "[flaml.automl: 09-29 23:08:13] {1763} INFO - iteration 63, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:13] {1952} INFO - at 13.0s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1707\n",
+ "[flaml.automl: 09-29 23:08:13] {1763} INFO - iteration 64, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:14] {1952} INFO - at 14.5s,\testimator my_xgb2's best error=0.1707,\tbest estimator my_xgb2's best error=0.1707\n",
+ "[flaml.automl: 09-29 23:08:14] {1763} INFO - iteration 65, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:14] {1952} INFO - at 14.7s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1707\n",
+ "[flaml.automl: 09-29 23:08:14] {1763} INFO - iteration 66, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:14] {1952} INFO - at 14.7s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1707\n",
+ "[flaml.automl: 09-29 23:08:14] {1763} INFO - iteration 67, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:18] {1952} INFO - at 18.5s,\testimator my_xgb2's best error=0.1707,\tbest estimator my_xgb2's best error=0.1707\n",
+ "[flaml.automl: 09-29 23:08:18] {1763} INFO - iteration 68, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:22] {1952} INFO - at 22.7s,\testimator my_xgb2's best error=0.1699,\tbest estimator my_xgb2's best error=0.1699\n",
+ "[flaml.automl: 09-29 23:08:22] {1763} INFO - iteration 69, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:23] {1952} INFO - at 23.0s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1699\n",
+ "[flaml.automl: 09-29 23:08:23] {1763} INFO - iteration 70, current learner my_xgb2\n",
+ "[flaml.automl: 09-29 23:08:28] {1952} INFO - at 28.1s,\testimator my_xgb2's best error=0.1685,\tbest estimator my_xgb2's best error=0.1685\n",
+ "[flaml.automl: 09-29 23:08:28] {1763} INFO - iteration 71, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:28] {1952} INFO - at 28.1s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1685\n",
+ "[flaml.automl: 09-29 23:08:28] {1763} INFO - iteration 72, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:28] {1952} INFO - at 28.2s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1685\n",
+ "[flaml.automl: 09-29 23:08:28] {1763} INFO - iteration 73, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:28] {1952} INFO - at 28.4s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1685\n",
+ "[flaml.automl: 09-29 23:08:28] {1763} INFO - iteration 74, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:28] {1952} INFO - at 28.5s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1685\n",
+ "[flaml.automl: 09-29 23:08:28] {1763} INFO - iteration 75, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:28] {1952} INFO - at 28.6s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1685\n",
+ "[flaml.automl: 09-29 23:08:28] {1763} INFO - iteration 76, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:28] {1952} INFO - at 28.7s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1685\n",
+ "[flaml.automl: 09-29 23:08:28] {1763} INFO - iteration 77, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:29] {1952} INFO - at 28.8s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1685\n",
+ "[flaml.automl: 09-29 23:08:29] {1763} INFO - iteration 78, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:29] {1952} INFO - at 28.9s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1685\n",
+ "[flaml.automl: 09-29 23:08:29] {1763} INFO - iteration 79, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:29] {1952} INFO - at 29.0s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1685\n",
+ "[flaml.automl: 09-29 23:08:29] {1763} INFO - iteration 80, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:29] {1952} INFO - at 29.1s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1685\n",
+ "[flaml.automl: 09-29 23:08:29] {1763} INFO - iteration 81, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:29] {1952} INFO - at 29.2s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1685\n",
+ "[flaml.automl: 09-29 23:08:29] {1763} INFO - iteration 82, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:29] {1952} INFO - at 29.3s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1685\n",
+ "[flaml.automl: 09-29 23:08:29] {1763} INFO - iteration 83, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:29] {1952} INFO - at 29.4s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1685\n",
+ "[flaml.automl: 09-29 23:08:29] {1763} INFO - iteration 84, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:29] {1952} INFO - at 29.6s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1685\n",
+ "[flaml.automl: 09-29 23:08:29] {1763} INFO - iteration 85, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:29] {1952} INFO - at 29.7s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1685\n",
+ "[flaml.automl: 09-29 23:08:29] {1763} INFO - iteration 86, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:30] {1952} INFO - at 29.8s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1685\n",
+ "[flaml.automl: 09-29 23:08:30] {1763} INFO - iteration 87, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:30] {1952} INFO - at 29.9s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1685\n",
+ "[flaml.automl: 09-29 23:08:30] {1763} INFO - iteration 88, current learner my_xgb1\n",
+ "[flaml.automl: 09-29 23:08:30] {1952} INFO - at 30.0s,\testimator my_xgb1's best error=1.0011,\tbest estimator my_xgb2's best error=0.1685\n",
+ "[flaml.automl: 09-29 23:08:30] {2059} INFO - selected model: \n",
+ "[flaml.automl: 09-29 23:08:35] {2122} INFO - retrain my_xgb2 for 4.9s\n",
+ "[flaml.automl: 09-29 23:08:35] {2128} INFO - retrained model: \n",
+ "[flaml.automl: 09-29 23:08:35] {1557} INFO - fit succeeded\n",
+ "[flaml.automl: 09-29 23:08:35] {1558} INFO - Time taken to find the best model: 28.05234169960022\n",
+ "[flaml.automl: 09-29 23:08:35] {1569} WARNING - Time taken to find the best model is 94% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n"
]
}
],
@@ -851,7 +801,7 @@
"text": [
"Best hyperparmeter config: {'n_estimators': 810, 'max_leaves': 148, 'min_child_weight': 30.65305732414229, 'learning_rate': 0.05793074143079172, 'subsample': 0.9452642648281835, 'colsample_bylevel': 0.8662229421401874, 'colsample_bytree': 0.7851677398738949, 'reg_alpha': 0.00738292823760415, 'reg_lambda': 1.2202619267865558}\n",
"Best r2 on validation data: 0.8315\n",
- "Training duration of best run: 4.888 s\n",
+ "Training duration of best run: 5.028 s\n",
"Predicted labels [146309.06 253975.23 148795.17 ... 192561.88 182641.44 270495.53]\n",
"True labels 14740 136900.0\n",
"10101 241300.0\n",
diff --git a/setup.py b/setup.py
index f17e675e0..4ae501fbe 100644
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@ install_requires = [
"lightgbm>=2.3.1",
"xgboost>=0.90,<=1.3.3",
"scipy>=1.4.1",
- "catboost>=0.23",
+ # "catboost>=0.23", # making optional for conda
"scikit-learn>=0.24",
]
@@ -47,6 +47,7 @@ setuptools.setup(
"coverage>=5.3",
"pre-commit",
"xgboost<1.3",
+ "catboost>=0.23",
"rgf-python",
"optuna==2.8.0",
"vowpalwabbit",
@@ -58,6 +59,7 @@ setuptools.setup(
"azure-storage-blob",
"statsmodels>=0.12.2",
],
+ "catboost": ["catboost>=0.23"],
"blendsearch": ["optuna==2.8.0"],
"ray": [
"ray[tune]==1.6.0",
diff --git a/test/test_automl.py b/test/test_automl.py
index adbbb8f38..ae2814f64 100644
--- a/test/test_automl.py
+++ b/test/test_automl.py
@@ -2,7 +2,12 @@ import unittest
import numpy as np
import scipy.sparse
-from sklearn.datasets import load_boston, load_iris, load_wine, load_breast_cancer
+from sklearn.datasets import (
+ fetch_california_housing,
+ load_iris,
+ load_wine,
+ load_breast_cancer,
+)
import pandas as pd
from datetime import datetime
@@ -17,59 +22,37 @@ from flaml.training_log import training_log_reader
class MyRegularizedGreedyForest(SKLearnEstimator):
- def __init__(
- self,
- task="binary",
- n_jobs=1,
- max_leaf=4,
- n_iter=1,
- n_tree_search=1,
- opt_interval=1,
- learning_rate=1.0,
- min_samples_leaf=1,
- **params
- ):
+ def __init__(self, task="binary", **config):
- super().__init__(task, **params)
+ super().__init__(task, **config)
- if "regression" in task:
- self.estimator_class = RGFRegressor
- else:
+ if task in ("binary", "multi"):
self.estimator_class = RGFClassifier
-
- # round integer hyperparameters
- self.params = {
- "n_jobs": n_jobs,
- "max_leaf": int(round(max_leaf)),
- "n_iter": int(round(n_iter)),
- "n_tree_search": int(round(n_tree_search)),
- "opt_interval": int(round(opt_interval)),
- "learning_rate": learning_rate,
- "min_samples_leaf": int(round(min_samples_leaf)),
- }
+ else:
+ self.estimator_class = RGFRegressor
@classmethod
def search_space(cls, data_size, task):
space = {
"max_leaf": {
- "domain": tune.qloguniform(lower=4, upper=data_size, q=1),
+ "domain": tune.lograndint(lower=4, upper=data_size),
"init_value": 4,
},
"n_iter": {
- "domain": tune.qloguniform(lower=1, upper=data_size, q=1),
+ "domain": tune.lograndint(lower=1, upper=data_size),
"init_value": 1,
},
"n_tree_search": {
- "domain": tune.qloguniform(lower=1, upper=32768, q=1),
+ "domain": tune.lograndint(lower=1, upper=32768),
"init_value": 1,
},
"opt_interval": {
- "domain": tune.qloguniform(lower=1, upper=10000, q=1),
+ "domain": tune.lograndint(lower=1, upper=10000),
"init_value": 100,
},
"learning_rate": {"domain": tune.loguniform(lower=0.01, upper=20.0)},
"min_samples_leaf": {
- "domain": tune.qloguniform(lower=1, upper=20, q=1),
+ "domain": tune.lograndint(lower=1, upper=20),
"init_value": 20,
},
}
@@ -97,15 +80,15 @@ def logregobj(preds, dtrain):
class MyXGB1(XGBoostEstimator):
"""XGBoostEstimator with logregobj as the objective function"""
- def __init__(self, **params):
- super().__init__(objective=logregobj, **params)
+ def __init__(self, **config):
+ super().__init__(objective=logregobj, **config)
class MyXGB2(XGBoostEstimator):
"""XGBoostEstimator with 'reg:squarederror' as the objective function"""
- def __init__(self, **params):
- super().__init__(objective="reg:squarederror", **params)
+ def __init__(self, **config):
+ super().__init__(objective="reg:squarederror", **config)
class MyLargeLGBM(LGBMEstimator):
@@ -266,7 +249,7 @@ class TestAutoML(unittest.TestCase):
"n_splits": 3,
"metric": "accuracy",
"log_training_metric": True,
- "verbose": 1,
+ "verbose": 4,
"ensemble": True,
}
automl.fit(X, y, **automl_settings)
@@ -281,7 +264,7 @@ class TestAutoML(unittest.TestCase):
"n_splits": 3,
"metric": "accuracy",
"log_training_metric": True,
- "verbose": 1,
+ "verbose": 4,
"ensemble": True,
}
automl.fit(X, y, **automl_settings)
@@ -296,7 +279,7 @@ class TestAutoML(unittest.TestCase):
"n_splits": 3,
"metric": "accuracy",
"log_training_metric": True,
- "verbose": 1,
+ "verbose": 4,
"ensemble": True,
}
automl.fit(X, y, **automl_settings)
@@ -311,7 +294,7 @@ class TestAutoML(unittest.TestCase):
"n_splits": 3,
"metric": "accuracy",
"log_training_metric": True,
- "verbose": 1,
+ "verbose": 4,
"ensemble": True,
}
automl.fit(X, y, **automl_settings)
@@ -525,7 +508,7 @@ class TestAutoML(unittest.TestCase):
"n_jobs": 1,
"model_history": True,
}
- X_train, y_train = load_boston(return_X_y=True)
+ X_train, y_train = fetch_california_housing(return_X_y=True)
n = int(len(y_train) * 9 // 10)
automl_experiment.fit(
X_train=X_train[:n],
@@ -648,7 +631,7 @@ class TestAutoML(unittest.TestCase):
"n_concurrent_trials": 2,
"hpo_method": hpo_method,
}
- X_train, y_train = load_boston(return_X_y=True)
+ X_train, y_train = fetch_california_housing(return_X_y=True)
try:
automl_experiment.fit(X_train=X_train, y_train=y_train, **automl_settings)
print(automl_experiment.predict(X_train))
@@ -861,8 +844,8 @@ class TestAutoML(unittest.TestCase):
automl_experiment = AutoML()
automl_settings = {
"time_budget": 3,
- "metric": 'accuracy',
- "task": 'classification',
+ "metric": "accuracy",
+ "task": "classification",
"log_file_name": "test/iris.log",
"log_training_metric": True,
"n_jobs": 1,
@@ -873,16 +856,19 @@ class TestAutoML(unittest.TestCase):
# test drop column
X_train.columns = range(X_train.shape[1])
X_train[X_train.shape[1]] = np.zeros(len(y_train))
- automl_experiment.fit(X_train=X_train, y_train=y_train,
- **automl_settings)
+ automl_experiment.fit(X_train=X_train, y_train=y_train, **automl_settings)
automl_val_accuracy = 1.0 - automl_experiment.best_loss
- print('Best ML leaner:', automl_experiment.best_estimator)
- print('Best hyperparmeter config:', automl_experiment.best_config)
- print('Best accuracy on validation data: {0:.4g}'.format(automl_val_accuracy))
- print('Training duration of best run: {0:.4g} s'.format(automl_experiment.best_config_train_time))
+ print("Best ML leaner:", automl_experiment.best_estimator)
+ print("Best hyperparmeter config:", automl_experiment.best_config)
+ print("Best accuracy on validation data: {0:.4g}".format(automl_val_accuracy))
+ print(
+ "Training duration of best run: {0:.4g} s".format(
+ automl_experiment.best_config_train_time
+ )
+ )
starting_points = {}
- log_file_name = automl_settings['log_file_name']
+ log_file_name = automl_settings["log_file_name"]
with training_log_reader(log_file_name) as reader:
for record in reader.records():
config = record.config
@@ -893,25 +879,28 @@ class TestAutoML(unittest.TestCase):
max_iter = sum([len(s) for k, s in starting_points.items()])
automl_settings_resume = {
"time_budget": 2,
- "metric": 'accuracy',
- "task": 'classification',
+ "metric": "accuracy",
+ "task": "classification",
"log_file_name": "test/iris_resume_all.log",
"log_training_metric": True,
"n_jobs": 1,
"max_iter": max_iter,
"model_history": True,
- "log_type": 'all',
+ "log_type": "all",
"starting_points": starting_points,
"append_log": True,
}
new_automl_experiment = AutoML()
- new_automl_experiment.fit(X_train=X_train, y_train=y_train,
- **automl_settings_resume)
+ new_automl_experiment.fit(
+ X_train=X_train, y_train=y_train, **automl_settings_resume
+ )
new_automl_val_accuracy = 1.0 - new_automl_experiment.best_loss
# print('Best ML leaner:', new_automl_experiment.best_estimator)
# print('Best hyperparmeter config:', new_automl_experiment.best_config)
- print('Best accuracy on validation data: {0:.4g}'.format(new_automl_val_accuracy))
+ print(
+ "Best accuracy on validation data: {0:.4g}".format(new_automl_val_accuracy)
+ )
# print('Training duration of best run: {0:.4g} s'.format(new_automl_experiment.best_config_train_time))
diff --git a/test/test_python_log.py b/test/test_python_log.py
index b68ad18e8..370e7c393 100644
--- a/test/test_python_log.py
+++ b/test/test_python_log.py
@@ -1,6 +1,6 @@
from flaml.tune.space import unflatten_hierarchical
from flaml import AutoML
-from sklearn.datasets import load_boston
+from sklearn.datasets import fetch_california_housing
import os
import unittest
import logging
@@ -9,7 +9,6 @@ import io
class TestLogging(unittest.TestCase):
-
def test_logging_level(self):
from flaml import logger, logger_formatter
@@ -30,8 +29,8 @@ class TestLogging(unittest.TestCase):
automl = AutoML()
automl_settings = {
"time_budget": 1,
- "metric": 'rmse',
- "task": 'regression',
+ "metric": "rmse",
+ "task": "regression",
"log_file_name": training_log,
"log_training_metric": True,
"n_jobs": 1,
@@ -39,35 +38,42 @@ class TestLogging(unittest.TestCase):
"keep_search_state": True,
"learner_selector": "roundrobin",
}
- X_train, y_train = load_boston(return_X_y=True)
+ X_train, y_train = fetch_california_housing(return_X_y=True)
n = len(y_train) >> 1
print(automl.model, automl.classes_, automl.predict(X_train))
- automl.fit(X_train=X_train[:n], y_train=y_train[:n],
- X_val=X_train[n:], y_val=y_train[n:],
- **automl_settings)
+ automl.fit(
+ X_train=X_train[:n],
+ y_train=y_train[:n],
+ X_val=X_train[n:],
+ y_val=y_train[n:],
+ **automl_settings
+ )
logger.info(automl.search_space)
logger.info(automl.low_cost_partial_config)
logger.info(automl.points_to_evaluate)
logger.info(automl.cat_hp_cost)
import optuna as ot
+
study = ot.create_study()
from flaml.tune.space import define_by_run_func, add_cost_to_space
+
sample = define_by_run_func(study.ask(), automl.search_space)
logger.info(sample)
logger.info(unflatten_hierarchical(sample, automl.search_space))
add_cost_to_space(
- automl.search_space, automl.low_cost_partial_config,
- automl.cat_hp_cost
+ automl.search_space, automl.low_cost_partial_config, automl.cat_hp_cost
)
logger.info(automl.search_space["ml"].categories)
config = automl.best_config.copy()
- config['learner'] = automl.best_estimator
+ config["learner"] = automl.best_estimator
automl.trainable({"ml": config})
from flaml import tune, BlendSearch
from flaml.automl import size
from functools import partial
+
search_alg = BlendSearch(
- metric='val_loss', mode='min',
+ metric="val_loss",
+ mode="min",
space=automl.search_space,
low_cost_partial_config=automl.low_cost_partial_config,
points_to_evaluate=automl.points_to_evaluate,
@@ -75,19 +81,25 @@ class TestLogging(unittest.TestCase):
prune_attr=automl.prune_attr,
min_resource=automl.min_resource,
max_resource=automl.max_resource,
- config_constraints=[(partial(size, automl._state), '<=', automl._mem_thres)],
- metric_constraints=automl.metric_constraints)
+ config_constraints=[
+ (partial(size, automl._state), "<=", automl._mem_thres)
+ ],
+ metric_constraints=automl.metric_constraints,
+ )
analysis = tune.run(
- automl.trainable, search_alg=search_alg, # verbose=2,
- time_budget_s=1, num_samples=-1)
- print(min(trial.last_result["val_loss"]
- for trial in analysis.trials))
- config = analysis.trials[-1].last_result['config']['ml']
- automl._state._train_with_config(config['learner'], config)
+ automl.trainable,
+ search_alg=search_alg, # verbose=2,
+ time_budget_s=1,
+ num_samples=-1,
+ )
+ print(min(trial.last_result["val_loss"] for trial in analysis.trials))
+ config = analysis.trials[-1].last_result["config"]["ml"]
+ automl._state._train_with_config(config["learner"], config)
# Check if the log buffer is populated.
self.assertTrue(len(buf.getvalue()) > 0)
import pickle
- with open('automl.pkl', 'wb') as f:
+
+ with open("automl.pkl", "wb") as f:
pickle.dump(automl, f, pickle.HIGHEST_PROTOCOL)
print(automl.__version__)
diff --git a/test/test_training_log.py b/test/test_training_log.py
index 40ac91ef0..73de45d25 100644
--- a/test/test_training_log.py
+++ b/test/test_training_log.py
@@ -2,15 +2,14 @@ import os
import unittest
from tempfile import TemporaryDirectory
-from sklearn.datasets import load_boston
+from sklearn.datasets import fetch_california_housing
from flaml import AutoML
from flaml.training_log import training_log_reader
class TestTrainingLog(unittest.TestCase):
-
- def test_training_log(self, path='test_training_log.log'):
+ def test_training_log(self, path="test_training_log.log"):
with TemporaryDirectory() as d:
filename = os.path.join(d, path)
@@ -19,8 +18,8 @@ class TestTrainingLog(unittest.TestCase):
automl = AutoML()
automl_settings = {
"time_budget": 1,
- "metric": 'mse',
- "task": 'regression',
+ "metric": "mse",
+ "task": "regression",
"log_file_name": filename,
"log_training_metric": True,
"mem_thres": 1024 * 1024,
@@ -31,10 +30,9 @@ class TestTrainingLog(unittest.TestCase):
"ensemble": True,
"keep_search_state": True,
}
- X_train, y_train = load_boston(return_X_y=True)
+ X_train, y_train = fetch_california_housing(return_X_y=True)
automl.fit(X_train=X_train, y_train=y_train, **automl_settings)
- automl._state._train_with_config(
- automl.best_estimator, automl.best_config)
+ automl._state._train_with_config(automl.best_estimator, automl.best_config)
# Check if the training log file is populated.
self.assertTrue(os.path.exists(filename))
@@ -49,11 +47,11 @@ class TestTrainingLog(unittest.TestCase):
automl.fit(X_train=X_train, y_train=y_train, **automl_settings)
automl._selected.update(None, 0)
automl = AutoML()
- automl.fit(X_train=X_train, y_train=y_train, max_iter=0)
+ automl.fit(X_train=X_train, y_train=y_train, max_iter=0, task="regression")
def test_illfilename(self):
try:
- self.test_training_log('/')
+ self.test_training_log("/")
except IsADirectoryError:
print("IsADirectoryError happens as expected in linux.")
except PermissionError:
diff --git a/test/tune/test_searcher.py b/test/tune/test_searcher.py
index 0a1421aef..ce07670ac 100644
--- a/test/tune/test_searcher.py
+++ b/test/tune/test_searcher.py
@@ -72,8 +72,9 @@ except (ImportError, AssertionError):
searcher = BlendSearch(
metric="m", global_search_alg=searcher, metric_constraints=[("c", "<", 1)]
)
- searcher.set_search_properties(metric="m2", config=config)
- searcher.set_search_properties(config={"time_budget_s": 0})
+ searcher.set_search_properties(
+ metric="m2", config=config, setting={"time_budget_s": 0}
+ )
c = searcher.suggest("t1")
searcher.on_trial_complete("t1", {"config": c}, True)
c = searcher.suggest("t2")
@@ -146,3 +147,11 @@ except (ImportError, AssertionError):
print(searcher.suggest("t4"))
searcher.on_trial_complete({"t1"}, {})
searcher.on_trial_result({"t2"}, {})
+ np.random.seed(654321)
+ searcher = RandomSearch(
+ space=config,
+ points_to_evaluate=[{"a": 7, "b": 1e-3}, {"a": 6, "b": 3e-4}],
+ )
+ print(searcher.suggest("t1"))
+ print(searcher.suggest("t2"))
+ print(searcher.suggest("t3"))