diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 000000000..02c10b28f
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,18 @@
+
+
+
+
+## Why are these changes needed?
+
+
+
+## Related issue number
+
+
+
+## Checks
+
+- [ ] I've used [pre-commit](https://microsoft.github.io/FLAML/docs/Contribute#pre-commit) to lint the changes in this PR, or I've made sure [lint with flake8](https://github.com/microsoft/FLAML/blob/816a82a1155b4de4705b21a615ccdff67c6da379/.github/workflows/python-package.yml#L54-L59) output is two 0s.
+- [ ] I've included any doc changes needed for https://microsoft.github.io/FLAML/. See https://microsoft.github.io/FLAML/docs/Contribute#documentation to build and test documentation locally.
+- [ ] I've added tests (if relevant) corresponding to the changes introduced in this PR.
+- [ ] I've made sure all auto checks have passed.
diff --git a/README.md b/README.md
index d368f709f..421ef87fc 100644
--- a/README.md
+++ b/README.md
@@ -12,9 +12,12 @@
+:fire: **Update (2022/08): We will give a [hands-on tutorial on FLAML at KDD 2022](https://github.com/microsoft/FLAML/tree/tutorial/tutorial) on 08/16/2022.**
+
+## What is FLAML
FLAML is a lightweight Python library that finds accurate machine
learning models automatically, efficiently and economically. It frees users from selecting
-learners and hyperparameters for each learner.
+learners and hyperparameters for each learner. It can also be used to tune generic hyperparameters for MLOps workflows, pipelines, mathematical/statistical models, algorithms, computing experiments, software configurations and so on.
1. For common machine learning tasks like classification and regression, it quickly finds quality models for user-provided data with low computational resources. It supports both classifcal machine learning models and deep neural networks.
1. It is easy to customize or extend. Users can find their desired customizability from a smooth range: minimal customization (computational resource budget), medium customization (e.g., scikit-style learner, search space and metric), or full customization (arbitrary training and evaluation code).
@@ -24,6 +27,7 @@ and learner selection method invented by Microsoft Research.
FLAML has a .NET implementation in [ML.NET](http://dot.net/ml), an open-source, cross-platform machine learning framework for .NET. In ML.NET, you can use FLAML via low-code solutions like [Model Builder](https://dotnet.microsoft.com/apps/machinelearning-ai/ml-dotnet/model-builder) Visual Studio extension and the cross-platform [ML.NET CLI](https://docs.microsoft.com/dotnet/machine-learning/automate-training-with-cli). Alternatively, you can use the [ML.NET AutoML API](https://www.nuget.org/packages/Microsoft.ML.AutoML/#versions-body-tab) for a code-first experience.
+
## Installation
### Python
diff --git a/flaml/automl.py b/flaml/automl.py
index ddaf478e7..7da169abc 100644
--- a/flaml/automl.py
+++ b/flaml/automl.py
@@ -44,6 +44,8 @@ from .data import (
TOKENCLASSIFICATION,
TS_FORECAST,
TS_FORECASTREGRESSION,
+ TS_FORECASTPANEL,
+ TS_TIMESTAMP_COL,
REGRESSION,
_is_nlp_task,
NLG_TASKS,
@@ -583,7 +585,7 @@ class AutoML(BaseEstimator):
["auto", 'stratified', 'uniform', 'time', 'group']. "auto" -> stratified.
For regression tasks, valid choices are ["auto", 'uniform', 'time'].
"auto" -> uniform.
- For ts_forecast tasks, must be "auto" or 'time'.
+ For time series forecast tasks, must be "auto" or 'time'.
For ranking task, must be "auto" or 'group'.
hpo_method: str, default="auto" | The hyperparameter
optimization method. By default, CFO is used for sequential
@@ -679,6 +681,7 @@ class AutoML(BaseEstimator):
}
}
```
+ skip_transform: boolean, default=False | Whether to pre-process data prior to modeling.
fit_kwargs_by_estimator: dict, default=None | The user specified keywords arguments, grouped by estimator name.
e.g.,
@@ -734,6 +737,7 @@ class AutoML(BaseEstimator):
"fit_kwargs_by_estimator", {}
)
settings["custom_hp"] = settings.get("custom_hp", {})
+ settings["skip_transform"] = settings.get("skip_transform", False)
self._estimator_type = (
"classifier" if settings["task"] in CLASSIFICATION else "regressor"
@@ -897,7 +901,7 @@ class AutoML(BaseEstimator):
Args:
X: A numpy array of featurized instances, shape n * m,
- or for ts_forecast tasks:
+ or for time series forcast tasks:
a pandas dataframe with the first column containing
timestamp values (datetime type) or an integer n for
the predict steps (only valid when the estimator is
@@ -1121,7 +1125,7 @@ class AutoML(BaseEstimator):
"or all columns of X are integer ids (tokenized)"
)
- if issparse(X_train_all):
+ if issparse(X_train_all) or self._skip_transform:
self._transformer = self._label_transformer = False
self._X_train_all, self._y_train_all = X, y
else:
@@ -1275,18 +1279,38 @@ class AutoML(BaseEstimator):
# if eval_method = holdout, make holdout data
if self._split_type == "time":
if self._state.task in TS_FORECAST:
- num_samples = X_train_all.shape[0]
period = self._state.fit_kwargs[
"period"
] # NOTE: _prepare_data is before kwargs is updated to fit_kwargs_by_estimator
- assert (
- period < num_samples
- ), f"period={period}>#examples={num_samples}"
- split_idx = num_samples - period
- X_train = X_train_all[:split_idx]
- y_train = y_train_all[:split_idx]
- X_val = X_train_all[split_idx:]
- y_val = y_train_all[split_idx:]
+ if self._state.task == TS_FORECASTPANEL:
+ X_train_all["time_idx"] -= X_train_all["time_idx"].min()
+ X_train_all["time_idx"] = X_train_all["time_idx"].astype("int")
+ ids = self._state.fit_kwargs["group_ids"].copy()
+ ids.append(TS_TIMESTAMP_COL)
+ ids.append("time_idx")
+ y_train_all = pd.DataFrame(y_train_all)
+ y_train_all[ids] = X_train_all[ids]
+ X_train_all = X_train_all.sort_values(ids)
+ y_train_all = y_train_all.sort_values(ids)
+ training_cutoff = X_train_all["time_idx"].max() - period
+ X_train = X_train_all[lambda x: x.time_idx <= training_cutoff]
+ y_train = y_train_all[
+ lambda x: x.time_idx <= training_cutoff
+ ].drop(columns=ids)
+ X_val = X_train_all[lambda x: x.time_idx > training_cutoff]
+ y_val = y_train_all[
+ lambda x: x.time_idx > training_cutoff
+ ].drop(columns=ids)
+ else:
+ num_samples = X_train_all.shape[0]
+ assert (
+ period < num_samples
+ ), f"period={period}>#examples={num_samples}"
+ split_idx = num_samples - period
+ X_train = X_train_all[:split_idx]
+ y_train = y_train_all[:split_idx]
+ X_val = X_train_all[split_idx:]
+ y_val = y_train_all[split_idx:]
else:
if (
"sample_weight" in self._state.fit_kwargs
@@ -1456,7 +1480,10 @@ class AutoML(BaseEstimator):
)
elif self._split_type == "time":
# logger.info("Using TimeSeriesSplit")
- if self._state.task in TS_FORECAST:
+ if (
+ self._state.task in TS_FORECAST
+ and self._state.task is not TS_FORECASTPANEL
+ ):
period = self._state.fit_kwargs[
"period"
] # NOTE: _prepare_data is before kwargs is updated to fit_kwargs_by_estimator
@@ -1468,6 +1495,14 @@ class AutoML(BaseEstimator):
)
logger.info(f"Using nsplits={n_splits} due to data size limit.")
self._state.kf = TimeSeriesSplit(n_splits=n_splits, test_size=period)
+ elif self._state.task is TS_FORECASTPANEL:
+ n_groups = X_train.groupby(
+ self._state.fit_kwargs.get("group_ids")
+ ).ngroups
+ period = self._state.fit_kwargs.get("period")
+ self._state.kf = TimeSeriesSplit(
+ n_splits=n_splits, test_size=period * n_groups
+ )
else:
self._state.kf = TimeSeriesSplit(n_splits=n_splits)
elif isinstance(self._split_type, str):
@@ -1542,6 +1577,7 @@ class AutoML(BaseEstimator):
record_id=-1,
auto_augment=None,
custom_hp=None,
+ skip_transform=None,
fit_kwargs_by_estimator=None,
**fit_kwargs,
):
@@ -1554,13 +1590,13 @@ class AutoML(BaseEstimator):
Args:
log_file_name: A string of the log file name.
X_train: A numpy array or dataframe of training data in shape n*m.
- For ts_forecast tasks, the first column of X_train
+ For time series forecast tasks, the first column of X_train
must be the timestamp column (datetime type). Other
columns in the dataframe are assumed to be exogenous
variables (categorical or numeric).
y_train: A numpy array or series of labels in shape n*1.
dataframe: A dataframe of training data including label column.
- For ts_forecast tasks, dataframe must be specified and should
+ For time series forecast tasks, dataframe must be specified and should
have at least two columns: timestamp and label, where the first
column is the timestamp column (datetime type). Other columns
in the dataframe are assumed to be exogenous variables
@@ -1587,7 +1623,7 @@ class AutoML(BaseEstimator):
["auto", 'stratified', 'uniform', 'time', 'group']. "auto" -> stratified.
For regression tasks, valid choices are ["auto", 'uniform', 'time'].
"auto" -> uniform.
- For ts_forecast tasks, must be "auto" or 'time'.
+ For time series forecast tasks, must be "auto" or 'time'.
For ranking task, must be "auto" or 'group'.
groups: None or array-like | Group labels (with matching length to
y_train) or groups counts (with sum equal to length of y_train)
@@ -1633,10 +1669,29 @@ class AutoML(BaseEstimator):
```
**fit_kwargs: Other key word arguments to pass to fit() function of
- the searched learners, such as sample_weight. Include:
- period: int | forecast horizon for ts_forecast tasks.
+ the searched learners, such as sample_weight. Below are a few examples of
+ estimator-specific parameters:
+ period: int | forecast horizon for all time series forecast tasks.
gpu_per_trial: float, default = 0 | A float of the number of gpus per trial,
- only used by TransformersEstimator and XGBoostSklearnEstimator.
+ only used by TransformersEstimator, XGBoostSklearnEstimator, and
+ TemporalFusionTransformerEstimator.
+ group_ids: list of strings of column names identifying a time series, only
+ used by TemporalFusionTransformerEstimator, required for
+ 'ts_forecast_panel' task. `group_ids` is a parameter for TimeSeriesDataSet object
+ from PyTorchForecasting.
+ For other parameters to describe your dataset, refer to
+ [TimeSeriesDataSet PyTorchForecasting](https://pytorch-forecasting.readthedocs.io/en/stable/api/pytorch_forecasting.data.timeseries.TimeSeriesDataSet.html).
+ To specify your variables, use `static_categoricals`, `static_reals`,
+ `time_varying_known_categoricals`, `time_varying_known_reals`,
+ `time_varying_unknown_categoricals`, `time_varying_unknown_reals`,
+ `variable_groups`. To provide more information on your data, use
+ `max_encoder_length`, `min_encoder_length`, `lags`.
+ log_dir: str, default = "lightning_logs" | Folder into which to log results
+ for tensorboard, only used by TemporalFusionTransformerEstimator.
+ max_epochs: int, default = 20 | Maximum number of epochs to run training,
+ only used by TemporalFusionTransformerEstimator.
+ batch_size: int, default = 64 | Batch size for training model, only
+ used by TemporalFusionTransformerEstimator.
"""
task = task or self._settings.get("task")
eval_method = eval_method or self._settings.get("eval_method")
@@ -1651,6 +1706,7 @@ class AutoML(BaseEstimator):
self._state.fit_kwargs = fit_kwargs
self._state.custom_hp = custom_hp or self._settings.get("custom_hp")
+ self._skip_transform = self._settings.get("skip_transform") if skip_transform is None else skip_transform
self._state.fit_kwargs_by_estimator = (
fit_kwargs_by_estimator or self._settings.get("fit_kwargs_by_estimator")
)
@@ -1769,11 +1825,15 @@ class AutoML(BaseEstimator):
elif self._state.task in TS_FORECAST:
assert split_type in ["auto", "time"]
self._split_type = "time"
-
assert isinstance(
self._state.fit_kwargs.get("period"),
int, # NOTE: _decide_split_type is before kwargs is updated to fit_kwargs_by_estimator
), f"missing a required integer 'period' for '{TS_FORECAST}' task."
+ if self._state.fit_kwargs.get("group_ids"):
+ self._state.task == TS_FORECASTPANEL
+ assert isinstance(
+ self._state.fit_kwargs.get("group_ids"), list
+ ), f"missing a required List[str] 'group_ids' for '{TS_FORECASTPANEL}' task."
elif self._state.task == "rank":
assert (
self._state.groups is not None
@@ -2072,7 +2132,11 @@ class AutoML(BaseEstimator):
use_ray=None,
metric_constraints=None,
custom_hp=None,
+<<<<<<< HEAD
cv_score_agg_func=None,
+=======
+ skip_transform=None,
+>>>>>>> main
fit_kwargs_by_estimator=None,
**fit_kwargs,
):
@@ -2080,13 +2144,13 @@ class AutoML(BaseEstimator):
Args:
X_train: A numpy array or a pandas dataframe of training data in
- shape (n, m). For ts_forecast tasks, the first column of X_train
+ shape (n, m). For time series forecsat tasks, the first column of X_train
must be the timestamp column (datetime type). Other columns in
the dataframe are assumed to be exogenous variables (categorical or numeric).
When using ray, X_train can be a ray.ObjectRef.
y_train: A numpy array or a pandas series of labels in shape (n, ).
dataframe: A dataframe of training data including label column.
- For ts_forecast tasks, dataframe must be specified and must have
+ For time series forecast tasks, dataframe must be specified and must have
at least two columns, timestamp and label, where the first
column is the timestamp column (datetime type). Other columns in
the dataframe are assumed to be exogenous variables (categorical or numeric).
@@ -2137,7 +2201,7 @@ class AutoML(BaseEstimator):
```
task: A string of the task type, e.g.,
'classification', 'regression', 'ts_forecast_regression',
- 'ts_forecast_classification', 'rank', 'seq-classification',
+ 'ts_forecast_classification', 'ts_forecast_panel', 'rank', 'seq-classification',
'seq-regression', 'summarization'.
n_jobs: An integer of the number of threads for training | default=-1.
Use all available resources when n_jobs == -1.
@@ -2202,7 +2266,7 @@ class AutoML(BaseEstimator):
["auto", 'stratified', 'uniform', 'time', 'group']. "auto" -> stratified.
For regression tasks, valid choices are ["auto", 'uniform', 'time'].
"auto" -> uniform.
- For ts_forecast tasks, must be "auto" or 'time'.
+ For time series forecast tasks, must be "auto" or 'time'.
For ranking task, must be "auto" or 'group'.
hpo_method: str, default="auto" | The hyperparameter
optimization method. By default, CFO is used for sequential
@@ -2277,6 +2341,8 @@ class AutoML(BaseEstimator):
Each key is the estimator name, each value is a dict of the custom search space for that estimator. Notice the
domain of the custom search space can either be a value of a sample.Domain object.
+
+
```python
custom_hp = {
"transformer_ms": {
@@ -2290,6 +2356,7 @@ class AutoML(BaseEstimator):
}
```
+<<<<<<< HEAD
cv_score_agg_func: customized cross-validation scores aggregate function. Default to average metrics across folds. If specificed, this function needs to
have the following signature:
@@ -2323,21 +2390,59 @@ class AutoML(BaseEstimator):
For TransformersEstimator, available fit_kwargs can be found from
[TrainingArgumentsForAuto](nlp/huggingface/training_args).
e.g.,
+=======
+ skip_transform: boolean, default=False | Whether to pre-process data prior to modeling.
+ fit_kwargs_by_estimator: dict, default=None | The user specified keywords arguments, grouped by estimator name.
+ For TransformersEstimator, available fit_kwargs can be found from
+ [TrainingArgumentsForAuto](nlp/huggingface/training_args).
+ e.g.,
+>>>>>>> main
```python
fit_kwargs_by_estimator = {
"transformer": {
"output_dir": "test/data/output/",
"fp16": False,
+ },
+ "tft": {
+ "max_encoder_length": 1,
+ "min_encoder_length": 1,
+ "static_categoricals": [],
+ "static_reals": [],
+ "time_varying_known_categoricals": [],
+ "time_varying_known_reals": [],
+ "time_varying_unknown_categoricals": [],
+ "time_varying_unknown_reals": [],
+ "variable_groups": {},
+ "lags": {},
}
}
```
**fit_kwargs: Other key word arguments to pass to fit() function of
- the searched learners, such as sample_weight. Include:
- period: int | forecast horizon for ts_forecast tasks.
+ the searched learners, such as sample_weight. Below are a few examples of
+ estimator-specific parameters:
+ period: int | forecast horizon for all time series forecast tasks.
gpu_per_trial: float, default = 0 | A float of the number of gpus per trial,
- only used by TransformersEstimator and XGBoostSklearnEstimator.
+ only used by TransformersEstimator, XGBoostSklearnEstimator, and
+ TemporalFusionTransformerEstimator.
+ group_ids: list of strings of column names identifying a time series, only
+ used by TemporalFusionTransformerEstimator, required for
+ 'ts_forecast_panel' task. `group_ids` is a parameter for TimeSeriesDataSet object
+ from PyTorchForecasting.
+ For other parameters to describe your dataset, refer to
+ [TimeSeriesDataSet PyTorchForecasting](https://pytorch-forecasting.readthedocs.io/en/stable/api/pytorch_forecasting.data.timeseries.TimeSeriesDataSet.html).
+ To specify your variables, use `static_categoricals`, `static_reals`,
+ `time_varying_known_categoricals`, `time_varying_known_reals`,
+ `time_varying_unknown_categoricals`, `time_varying_unknown_reals`,
+ `variable_groups`. To provide more information on your data, use
+ `max_encoder_length`, `min_encoder_length`, `lags`.
+ log_dir: str, default = "lightning_logs" | Folder into which to log results
+ for tensorboard, only used by TemporalFusionTransformerEstimator.
+ max_epochs: int, default = 20 | Maximum number of epochs to run training,
+ only used by TemporalFusionTransformerEstimator.
+ batch_size: int, default = 64 | Batch size for training model, only
+ used by TemporalFusionTransformerEstimator.
"""
self._state._start_time_flag = self._start_time_flag = time.time()
@@ -2450,6 +2555,7 @@ class AutoML(BaseEstimator):
self._state.fit_kwargs = fit_kwargs
custom_hp = custom_hp or self._settings.get("custom_hp")
+ self._skip_transform = self._settings.get("skip_transform") if skip_transform is None else skip_transform
fit_kwargs_by_estimator = fit_kwargs_by_estimator or self._settings.get(
"fit_kwargs_by_estimator"
)
@@ -2605,6 +2711,8 @@ class AutoML(BaseEstimator):
estimator_list = ["lgbm", "xgboost", "xgb_limitdepth"]
elif _is_nlp_task(self._state.task):
estimator_list = ["transformer"]
+ elif self._state.task == TS_FORECASTPANEL:
+ estimator_list = ["tft"]
else:
try:
import catboost
diff --git a/flaml/data.py b/flaml/data.py
index 28960a0e2..9deab1b79 100644
--- a/flaml/data.py
+++ b/flaml/data.py
@@ -32,9 +32,11 @@ TS_FORECASTREGRESSION = (
"ts_forecast_regression",
)
TS_FORECASTCLASSIFICATION = "ts_forecast_classification"
+TS_FORECASTPANEL = "ts_forecast_panel"
TS_FORECAST = (
*TS_FORECASTREGRESSION,
TS_FORECASTCLASSIFICATION,
+ TS_FORECASTPANEL,
)
TS_TIMESTAMP_COL = "ds"
TS_VALUE_COL = "y"
@@ -248,6 +250,26 @@ def concat(X1, X2):
return np.concatenate([X1, X2])
+def add_time_idx_col(X):
+ unique_dates = X[TS_TIMESTAMP_COL].drop_duplicates().sort_values(ascending=True)
+ # assume no missing timestamps
+ freq = pd.infer_freq(unique_dates)
+ if freq == "MS":
+ X["time_idx"] = X[TS_TIMESTAMP_COL].dt.year * 12 + X[TS_TIMESTAMP_COL].dt.month
+ elif freq == "Y":
+ X["time_idx"] = X[TS_TIMESTAMP_COL].dt.year
+ else:
+ # using time frequency to generate all time stamps and then indexing for time_idx
+ # full_range = pd.date_range(X[TS_TIMESTAMP_COL].min(), X[TS_TIMESTAMP_COL].max(), freq=freq).to_list()
+ # X["time_idx"] = [full_range.index(time) for time in X[TS_TIMESTAMP_COL]]
+ # taking minimum difference in timestamp
+ timestamps = unique_dates.view("int64")
+ freq = int(timestamps.diff().mode())
+ X["time_idx"] = timestamps - timestamps.min() / freq
+ X["time_idx"] = X["time_idx"].astype("int")
+ return X
+
+
class DataTransformer:
"""Transform input training data."""
@@ -281,6 +303,9 @@ class DataTransformer:
drop = False
if task in TS_FORECAST:
X = X.rename(columns={X.columns[0]: TS_TIMESTAMP_COL})
+ if task is TS_FORECASTPANEL:
+ if "time_idx" not in X:
+ X = add_time_idx_col(X)
ds_col = X.pop(TS_TIMESTAMP_COL)
if isinstance(y, Series):
y = y.rename(TS_VALUE_COL)
diff --git a/flaml/default/portfolio.py b/flaml/default/portfolio.py
index b25642bfb..527dae05e 100644
--- a/flaml/default/portfolio.py
+++ b/flaml/default/portfolio.py
@@ -6,6 +6,7 @@ import json
from sklearn.preprocessing import RobustScaler
from flaml.default import greedy
from flaml.default.regret import load_result, build_regret
+from flaml.version import __version__
regret_bound = 0.01
@@ -113,7 +114,6 @@ def serialize(configs, regret, meta_features, output_file, config_path):
)
portfolio = [load_json(config_path.joinpath(m + ".json")) for m in configs]
regret = regret.loc[configs]
- from flaml import __version__
meta_predictor = {
"version": __version__,
diff --git a/flaml/default/suggest.py b/flaml/default/suggest.py
index 50c8503e7..aa22f0e0a 100644
--- a/flaml/default/suggest.py
+++ b/flaml/default/suggest.py
@@ -5,12 +5,17 @@ import pathlib
import json
from flaml.data import CLASSIFICATION, DataTransformer
from flaml.ml import get_estimator_class, get_classification_objective
+from flaml.version import __version__
LOCATION = pathlib.Path(__file__).parent.resolve()
logger = logging.getLogger(__name__)
CONFIG_PREDICTORS = {}
+def version_parse(version):
+ return tuple(map(int, (version.split("."))))
+
+
def meta_feature(task, X_train, y_train, meta_feature_names):
this_feature = []
n_row = X_train.shape[0]
@@ -72,11 +77,14 @@ def suggest_config(task, X, y, estimator_or_predictor, location=None, k=None):
if isinstance(estimator_or_predictor, str)
else estimator_or_predictor
)
- from flaml import __version__
older_version = "1.0.2"
# TODO: update older_version when the newer code can no longer handle the older version json file
- assert __version__ >= predictor["version"] >= older_version
+ assert (
+ version_parse(__version__)
+ >= version_parse(predictor["version"])
+ >= version_parse(older_version)
+ )
prep = predictor["preprocessing"]
feature = meta_feature(
task, X_train=X, y_train=y, meta_feature_names=predictor["meta_feature_names"]
diff --git a/flaml/ml.py b/flaml/ml.py
index ec9a31063..02226a10e 100644
--- a/flaml/ml.py
+++ b/flaml/ml.py
@@ -37,6 +37,7 @@ from .model import (
ARIMA,
SARIMAX,
TransformersEstimator,
+ TemporalFusionTransformerEstimator,
TransformersEstimatorModelSelection,
)
from .data import CLASSIFICATION, group_counts, TS_FORECAST
@@ -122,6 +123,8 @@ def get_estimator_class(task, estimator_name):
estimator_class = SARIMAX
elif estimator_name == "transformer":
estimator_class = TransformersEstimator
+ elif estimator_name == "tft":
+ estimator_class = TemporalFusionTransformerEstimator
elif estimator_name == "transformer_ms":
estimator_class = TransformersEstimatorModelSelection
else:
@@ -473,7 +476,7 @@ def evaluate_model_CV(
"label_list"
) # pass the label list on to compute the evaluation metric
groups = None
- shuffle = False if task in TS_FORECAST else True
+ shuffle = getattr(kf, "shuffle", task not in TS_FORECAST)
if isinstance(kf, RepeatedStratifiedKFold):
kf = kf.split(X_train_split, y_train_split)
elif isinstance(kf, GroupKFold):
diff --git a/flaml/model.py b/flaml/model.py
index 0eb6e1b61..4a7825f1a 100644
--- a/flaml/model.py
+++ b/flaml/model.py
@@ -23,6 +23,7 @@ from . import tune
from .data import (
group_counts,
CLASSIFICATION,
+ add_time_idx_col,
TS_FORECASTREGRESSION,
TS_TIMESTAMP_COL,
TS_VALUE_COL,
@@ -31,7 +32,6 @@ from .data import (
TOKENCLASSIFICATION,
SUMMARIZATION,
NLG_TASKS,
- MULTICHOICECLASSIFICATION,
)
try:
@@ -2152,6 +2152,193 @@ class XGBoostLimitDepth_TS(TS_SKLearn):
base_class = XGBoostLimitDepthEstimator
+class TemporalFusionTransformerEstimator(SKLearnEstimator):
+ """The class for tuning Temporal Fusion Transformer"""
+
+ @classmethod
+ def search_space(cls, data_size, pred_horizon, **params):
+ space = {
+ "gradient_clip_val": {
+ "domain": tune.loguniform(lower=0.01, upper=100.0),
+ "init_value": 0.01,
+ },
+ "hidden_size": {
+ "domain": tune.lograndint(lower=8, upper=512),
+ "init_value": 16,
+ },
+ "hidden_continuous_size": {
+ "domain": tune.randint(lower=1, upper=65),
+ "init_value": 8,
+ },
+ "attention_head_size": {
+ "domain": tune.randint(lower=1, upper=5),
+ "init_value": 4,
+ },
+ "dropout": {
+ "domain": tune.uniform(lower=0.1, upper=0.3),
+ "init_value": 0.1,
+ },
+ "learning_rate": {
+ "domain": tune.loguniform(lower=0.00001, upper=1.0),
+ "init_value": 0.001,
+ },
+ }
+ return space
+
+ def transform_ds(self, X_train, y_train, **kwargs):
+ y_train = DataFrame(y_train, columns=[TS_VALUE_COL])
+ self.data = X_train.join(y_train)
+
+ max_prediction_length = kwargs["period"]
+ self.max_encoder_length = kwargs["max_encoder_length"]
+ training_cutoff = self.data["time_idx"].max() - max_prediction_length
+
+ from pytorch_forecasting import TimeSeriesDataSet
+ from pytorch_forecasting.data import GroupNormalizer
+
+ self.group_ids = kwargs["group_ids"].copy()
+ training = TimeSeriesDataSet(
+ self.data[lambda x: x.time_idx <= training_cutoff],
+ time_idx="time_idx",
+ target=TS_VALUE_COL,
+ group_ids=self.group_ids,
+ min_encoder_length=kwargs.get(
+ "min_encoder_length", self.max_encoder_length // 2
+ ), # keep encoder length long (as it is in the validation set)
+ max_encoder_length=self.max_encoder_length,
+ min_prediction_length=1,
+ max_prediction_length=max_prediction_length,
+ static_categoricals=kwargs.get("static_categoricals", []),
+ static_reals=kwargs.get("static_reals", []),
+ time_varying_known_categoricals=kwargs.get(
+ "time_varying_known_categoricals", []
+ ),
+ time_varying_known_reals=kwargs.get("time_varying_known_reals", []),
+ time_varying_unknown_categoricals=kwargs.get(
+ "time_varying_unknown_categoricals", []
+ ),
+ time_varying_unknown_reals=kwargs.get("time_varying_unknown_reals", []),
+ variable_groups=kwargs.get(
+ "variable_groups", {}
+ ), # group of categorical variables can be treated as one variable
+ lags=kwargs.get("lags", {}),
+ target_normalizer=GroupNormalizer(
+ groups=kwargs["group_ids"], transformation="softplus"
+ ), # use softplus and normalize by group
+ add_relative_time_idx=True,
+ add_target_scales=True,
+ add_encoder_length=True,
+ )
+
+ # create validation set (predict=True) which means to predict the last max_prediction_length points in time
+ # for each series
+ validation = TimeSeriesDataSet.from_dataset(
+ training, self.data, predict=True, stop_randomization=True
+ )
+
+ # create dataloaders for model
+ batch_size = kwargs.get("batch_size", 64)
+ train_dataloader = training.to_dataloader(
+ train=True, batch_size=batch_size, num_workers=0
+ )
+ val_dataloader = validation.to_dataloader(
+ train=False, batch_size=batch_size * 10, num_workers=0
+ )
+
+ return training, train_dataloader, val_dataloader
+
+ def fit(self, X_train, y_train, budget=None, **kwargs):
+ import copy
+ from pathlib import Path
+ import warnings
+ import numpy as np
+ import pandas as pd
+ import pytorch_lightning as pl
+ from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor
+ from pytorch_lightning.loggers import TensorBoardLogger
+ import torch
+ from pytorch_forecasting import TemporalFusionTransformer
+ from pytorch_forecasting.metrics import QuantileLoss
+ import tensorboard as tb
+
+ warnings.filterwarnings("ignore")
+ current_time = time.time()
+ training, train_dataloader, val_dataloader = self.transform_ds(
+ X_train, y_train, **kwargs
+ )
+ params = self.params.copy()
+ gradient_clip_val = params.pop("gradient_clip_val")
+ params.pop("n_jobs")
+ max_epochs = kwargs.get("max_epochs", 20)
+ early_stop_callback = EarlyStopping(
+ monitor="val_loss", min_delta=1e-4, patience=10, verbose=False, mode="min"
+ )
+ lr_logger = LearningRateMonitor() # log the learning rate
+ logger = TensorBoardLogger(
+ kwargs.get("log_dir", "lightning_logs")
+ ) # logging results to a tensorboard
+ default_trainer_kwargs = dict(
+ gpus=self._kwargs.get("gpu_per_trial", [0])
+ if torch.cuda.is_available()
+ else None,
+ max_epochs=max_epochs,
+ gradient_clip_val=gradient_clip_val,
+ callbacks=[lr_logger, early_stop_callback],
+ logger=logger,
+ )
+ trainer = pl.Trainer(
+ **default_trainer_kwargs,
+ )
+ tft = TemporalFusionTransformer.from_dataset(
+ training,
+ **params,
+ lstm_layers=2, # 2 is mostly optimal according to documentation
+ output_size=7, # 7 quantiles by default
+ loss=QuantileLoss(),
+ log_interval=10, # uncomment for learning rate finder and otherwise, e.g. to 10 for logging every 10 batches
+ reduce_on_plateau_patience=4,
+ )
+ # fit network
+ trainer.fit(
+ tft,
+ train_dataloaders=train_dataloader,
+ val_dataloaders=val_dataloader,
+ )
+ best_model_path = trainer.checkpoint_callback.best_model_path
+ best_tft = TemporalFusionTransformer.load_from_checkpoint(best_model_path)
+ train_time = time.time() - current_time
+ self._model = best_tft
+ return train_time
+
+ def predict(self, X):
+ import pandas as pd
+
+ ids = self.group_ids.copy()
+ ids.append(TS_TIMESTAMP_COL)
+ encoder_data = self.data[
+ lambda x: x.time_idx > x.time_idx.max() - self.max_encoder_length
+ ]
+ # following pytorchforecasting example, make all target values equal to the last data
+ last_data_cols = self.group_ids.copy()
+ last_data_cols.append(TS_VALUE_COL)
+ last_data = self.data[lambda x: x.time_idx == x.time_idx.max()][last_data_cols]
+ decoder_data = X
+ if "time_idx" not in decoder_data:
+ decoder_data = add_time_idx_col(decoder_data)
+ decoder_data["time_idx"] += (
+ encoder_data["time_idx"].max() + 1 - decoder_data["time_idx"].min()
+ )
+ # decoder_data[TS_VALUE_COL] = 0
+ decoder_data = decoder_data.merge(last_data, how="inner", on=self.group_ids)
+ decoder_data = decoder_data.sort_values(ids)
+ new_prediction_data = pd.concat([encoder_data, decoder_data], ignore_index=True)
+ new_prediction_data["time_idx"] = new_prediction_data["time_idx"].astype("int")
+ new_raw_predictions = self._model.predict(new_prediction_data)
+ index = [decoder_data[idx].to_numpy() for idx in ids]
+ predictions = pd.Series(new_raw_predictions.numpy().ravel(), index=index)
+ return predictions
+
+
class suppress_stdout_stderr(object):
def __init__(self):
# Open a pair of null files
diff --git a/flaml/tune/__init__.py b/flaml/tune/__init__.py
index 7c441c32a..3d6c89101 100644
--- a/flaml/tune/__init__.py
+++ b/flaml/tune/__init__.py
@@ -5,7 +5,6 @@ try:
from ray.tune import (
uniform,
quniform,
- choice,
randint,
qrandint,
randn,
@@ -14,12 +13,12 @@ try:
qloguniform,
lograndint,
qlograndint,
+ sample,
)
except (ImportError, AssertionError):
from .sample import (
uniform,
quniform,
- choice,
randint,
qrandint,
randn,
@@ -29,7 +28,9 @@ except (ImportError, AssertionError):
lograndint,
qlograndint,
)
+ from . import sample
from .tune import run, report, INCUMBENT_RESULT
from .sample import polynomial_expansion_set
from .sample import PolynomialExpansionSet, Categorical, Float
from .trial import Trial
+from .utils import choice
diff --git a/flaml/tune/space.py b/flaml/tune/space.py
index a6b4a4861..7e2bf4de3 100644
--- a/flaml/tune/space.py
+++ b/flaml/tune/space.py
@@ -225,15 +225,18 @@ def add_cost_to_space(space: Dict, low_cost_point: Dict, choice_cost: Dict):
domain.choice_cost = cost[ind]
domain.const = [domain.const[i] for i in ind]
domain.ordered = True
- elif all(
- isinstance(x, int) or isinstance(x, float) for x in domain.categories
- ):
- # sort the choices by value
- ind = np.argsort(domain.categories)
- domain.categories = [domain.categories[i] for i in ind]
- domain.ordered = True
else:
- domain.ordered = False
+ ordered = getattr(domain, "ordered", None)
+ if ordered is None:
+ # automatically decide whether to order the choices based on the value type
+ domain.ordered = ordered = all(
+ isinstance(x, (int, float)) for x in domain.categories
+ )
+ if ordered:
+ # sort the choices by value
+ ind = np.argsort(domain.categories)
+ domain.categories = [domain.categories[i] for i in ind]
+
if low_cost and low_cost not in domain.categories:
assert isinstance(
low_cost, list
diff --git a/flaml/tune/utils.py b/flaml/tune/utils.py
new file mode 100644
index 000000000..53dfba3a7
--- /dev/null
+++ b/flaml/tune/utils.py
@@ -0,0 +1,28 @@
+from typing import Sequence
+
+try:
+ from ray import __version__ as ray_version
+
+ assert ray_version >= "1.10.0"
+ from ray.tune import sample
+except (ImportError, AssertionError):
+ from . import sample
+
+
+def choice(categories: Sequence, order=None):
+ """Sample a categorical value.
+ Sampling from ``tune.choice([1, 2])`` is equivalent to sampling from
+ ``np.random.choice([1, 2])``
+
+ Args:
+ categories (Sequence): Sequence of categories to sample from.
+ order (bool): Whether the categories have an order. If None, will be decided autoamtically:
+ Numerical categories have an order, while string categories do not.
+ """
+ domain = sample.Categorical(categories).uniform()
+ domain.ordered = (
+ order
+ if order is not None
+ else all(isinstance(x, (int, float)) for x in categories)
+ )
+ return domain
diff --git a/flaml/version.py b/flaml/version.py
index 39e0411d5..9fd0f8dd6 100644
--- a/flaml/version.py
+++ b/flaml/version.py
@@ -1 +1 @@
-__version__ = "1.0.9"
+__version__ = "1.0.10"
diff --git a/notebook/automl_time_series_forecast.ipynb b/notebook/automl_time_series_forecast.ipynb
index 81ba9e536..719fdb6ee 100644
--- a/notebook/automl_time_series_forecast.ipynb
+++ b/notebook/automl_time_series_forecast.ipynb
@@ -26,110 +26,9 @@
},
{
"cell_type": "code",
- "execution_count": 1,
+ "execution_count": null,
"metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Requirement already satisfied: flaml[notebook,ts_forecast] in c:\\users\\pythonprojects\\flaml (0.9.2)\n",
- "Requirement already satisfied: NumPy>=1.16.2 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from flaml[notebook,ts_forecast]) (1.18.5)\n",
- "Requirement already satisfied: lightgbm>=2.3.1 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from flaml[notebook,ts_forecast]) (3.2.1)\n",
- "Requirement already satisfied: xgboost<=1.3.3,>=0.90 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from flaml[notebook,ts_forecast]) (1.2.1)\n",
- "Requirement already satisfied: scipy>=1.4.1 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from flaml[notebook,ts_forecast]) (1.5.2)\n",
- "Requirement already satisfied: pandas>=1.1.4 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from flaml[notebook,ts_forecast]) (1.3.4)\n",
- "Requirement already satisfied: scikit-learn>=0.24 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from flaml[notebook,ts_forecast]) (1.0.1)\n",
- "Requirement already satisfied: prophet>=1.0.1 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from flaml[notebook,ts_forecast]) (1.0.1)\n",
- "Requirement already satisfied: statsmodels>=0.12.2 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from flaml[notebook,ts_forecast]) (0.13.1)\n",
- "Requirement already satisfied: openml==0.10.2 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from flaml[notebook,ts_forecast]) (0.10.2)\n",
- "Requirement already satisfied: jupyter in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from flaml[notebook,ts_forecast]) (1.0.0)\n",
- "Requirement already satisfied: matplotlib in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from flaml[notebook,ts_forecast]) (3.2.0)\n",
- "Requirement already satisfied: rgf-python in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from flaml[notebook,ts_forecast]) (3.10.0)\n",
- "Requirement already satisfied: catboost>=0.26 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from flaml[notebook,ts_forecast]) (0.26)\n",
- "Requirement already satisfied: xmltodict in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from openml==0.10.2->flaml[notebook,ts_forecast]) (0.12.0)\n",
- "Requirement already satisfied: python-dateutil in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from openml==0.10.2->flaml[notebook,ts_forecast]) (2.8.1)\n",
- "Requirement already satisfied: liac-arff>=2.4.0 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from openml==0.10.2->flaml[notebook,ts_forecast]) (2.5.0)\n",
- "Requirement already satisfied: requests in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from openml==0.10.2->flaml[notebook,ts_forecast]) (2.25.1)\n",
- "Requirement already satisfied: six in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from catboost>=0.26->flaml[notebook,ts_forecast]) (1.15.0)\n",
- "Requirement already satisfied: graphviz in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from catboost>=0.26->flaml[notebook,ts_forecast]) (0.16)\n",
- "Requirement already satisfied: plotly in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from catboost>=0.26->flaml[notebook,ts_forecast]) (3.10.0)\n",
- "Requirement already satisfied: wheel in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from lightgbm>=2.3.1->flaml[notebook,ts_forecast]) (0.36.2)\n",
- "Requirement already satisfied: pytz>=2017.3 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from pandas>=1.1.4->flaml[notebook,ts_forecast]) (2021.1)\n",
- "Requirement already satisfied: Cython>=0.22 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from prophet>=1.0.1->flaml[notebook,ts_forecast]) (0.29.14)\n",
- "Requirement already satisfied: setuptools-git>=1.2 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from prophet>=1.0.1->flaml[notebook,ts_forecast]) (1.2)\n",
- "Requirement already satisfied: tqdm>=4.36.1 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from prophet>=1.0.1->flaml[notebook,ts_forecast]) (4.49.0)\n",
- "Requirement already satisfied: pystan~=2.19.1.1 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from prophet>=1.0.1->flaml[notebook,ts_forecast]) (2.19.1.1)\n",
- "Requirement already satisfied: LunarCalendar>=0.0.9 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from prophet>=1.0.1->flaml[notebook,ts_forecast]) (0.0.9)\n",
- "Requirement already satisfied: convertdate>=2.1.2 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from prophet>=1.0.1->flaml[notebook,ts_forecast]) (2.3.2)\n",
- "Requirement already satisfied: holidays>=0.10.2 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from prophet>=1.0.1->flaml[notebook,ts_forecast]) (0.11.2)\n",
- "Requirement already satisfied: cmdstanpy==0.9.68 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from prophet>=1.0.1->flaml[notebook,ts_forecast]) (0.9.68)\n",
- "Requirement already satisfied: ujson in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from cmdstanpy==0.9.68->prophet>=1.0.1->flaml[notebook,ts_forecast]) (4.0.2)\n",
- "Requirement already satisfied: pymeeus<=1,>=0.3.13 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from convertdate>=2.1.2->prophet>=1.0.1->flaml[notebook,ts_forecast]) (0.5.11)\n",
- "Requirement already satisfied: korean-lunar-calendar in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from holidays>=0.10.2->prophet>=1.0.1->flaml[notebook,ts_forecast]) (0.2.1)\n",
- "Requirement already satisfied: hijri-converter in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from holidays>=0.10.2->prophet>=1.0.1->flaml[notebook,ts_forecast]) (2.2.0)\n",
- "Requirement already satisfied: ephem>=3.7.5.3 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from LunarCalendar>=0.0.9->prophet>=1.0.1->flaml[notebook,ts_forecast]) (4.0.0.2)\n",
- "Requirement already satisfied: cycler>=0.10 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from matplotlib->flaml[notebook,ts_forecast]) (0.10.0)\n",
- "Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from matplotlib->flaml[notebook,ts_forecast]) (2.4.7)\n",
- "Requirement already satisfied: kiwisolver>=1.0.1 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from matplotlib->flaml[notebook,ts_forecast]) (1.3.1)\n",
- "Requirement already satisfied: joblib>=0.11 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from scikit-learn>=0.24->flaml[notebook,ts_forecast]) (0.14.1)\n",
- "Requirement already satisfied: threadpoolctl>=2.0.0 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from scikit-learn>=0.24->flaml[notebook,ts_forecast]) (2.2.0)\n",
- "Requirement already satisfied: patsy>=0.5.2 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from statsmodels>=0.12.2->flaml[notebook,ts_forecast]) (0.5.2)\n",
- "Requirement already satisfied: notebook in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from jupyter->flaml[notebook,ts_forecast]) (5.4.1)\n",
- "Requirement already satisfied: qtconsole in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from jupyter->flaml[notebook,ts_forecast]) (5.1.1)\n",
- "Requirement already satisfied: ipywidgets in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from jupyter->flaml[notebook,ts_forecast]) (7.2.1)\n",
- "Requirement already satisfied: nbconvert in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from jupyter->flaml[notebook,ts_forecast]) (6.1.0)\n",
- "Requirement already satisfied: ipykernel in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from jupyter->flaml[notebook,ts_forecast]) (5.3.4)\n",
- "Requirement already satisfied: jupyter-console in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from jupyter->flaml[notebook,ts_forecast]) (6.0.0)\n",
- "Requirement already satisfied: ipython>=5.0.0 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from ipykernel->jupyter->flaml[notebook,ts_forecast]) (7.18.1)\n",
- "Requirement already satisfied: traitlets>=4.1.0 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from ipykernel->jupyter->flaml[notebook,ts_forecast]) (5.0.5)\n",
- "Requirement already satisfied: jupyter-client in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from ipykernel->jupyter->flaml[notebook,ts_forecast]) (6.1.7)\n",
- "Requirement already satisfied: tornado>=4.2 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from ipykernel->jupyter->flaml[notebook,ts_forecast]) (6.0.4)\n",
- "Requirement already satisfied: pygments in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from ipython>=5.0.0->ipykernel->jupyter->flaml[notebook,ts_forecast]) (2.9.0)\n",
- "Requirement already satisfied: decorator in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from ipython>=5.0.0->ipykernel->jupyter->flaml[notebook,ts_forecast]) (5.0.9)\n",
- "Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from ipython>=5.0.0->ipykernel->jupyter->flaml[notebook,ts_forecast]) (2.0.10)\n",
- "Requirement already satisfied: jedi>=0.10 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from ipython>=5.0.0->ipykernel->jupyter->flaml[notebook,ts_forecast]) (0.18.0)\n",
- "Requirement already satisfied: backcall in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from ipython>=5.0.0->ipykernel->jupyter->flaml[notebook,ts_forecast]) (0.2.0)\n",
- "Requirement already satisfied: colorama in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from ipython>=5.0.0->ipykernel->jupyter->flaml[notebook,ts_forecast]) (0.4.4)\n",
- "Requirement already satisfied: setuptools>=18.5 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from ipython>=5.0.0->ipykernel->jupyter->flaml[notebook,ts_forecast]) (52.0.0.post20210125)\n",
- "Requirement already satisfied: pickleshare in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from ipython>=5.0.0->ipykernel->jupyter->flaml[notebook,ts_forecast]) (0.7.5)\n",
- "Requirement already satisfied: parso<0.9.0,>=0.8.0 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from jedi>=0.10->ipython>=5.0.0->ipykernel->jupyter->flaml[notebook,ts_forecast]) (0.8.2)\n",
- "Requirement already satisfied: wcwidth in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython>=5.0.0->ipykernel->jupyter->flaml[notebook,ts_forecast]) (0.2.5)\n",
- "Requirement already satisfied: ipython-genutils in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from traitlets>=4.1.0->ipykernel->jupyter->flaml[notebook,ts_forecast]) (0.2.0)\n",
- "Requirement already satisfied: widgetsnbextension~=3.2.0 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from ipywidgets->jupyter->flaml[notebook,ts_forecast]) (3.2.1)\n",
- "Requirement already satisfied: nbformat>=4.2.0 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from ipywidgets->jupyter->flaml[notebook,ts_forecast]) (5.1.3)\n",
- "Requirement already satisfied: jupyter-core in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from nbformat>=4.2.0->ipywidgets->jupyter->flaml[notebook,ts_forecast]) (4.7.1)\n",
- "Requirement already satisfied: jsonschema!=2.5.0,>=2.4 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from nbformat>=4.2.0->ipywidgets->jupyter->flaml[notebook,ts_forecast]) (3.2.0)\n",
- "Requirement already satisfied: pyrsistent>=0.14.0 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.2.0->ipywidgets->jupyter->flaml[notebook,ts_forecast]) (0.18.0)\n",
- "Requirement already satisfied: attrs>=17.4.0 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.2.0->ipywidgets->jupyter->flaml[notebook,ts_forecast]) (21.2.0)\n",
- "Requirement already satisfied: terminado>=0.8.1 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from notebook->jupyter->flaml[notebook,ts_forecast]) (0.10.1)\n",
- "Requirement already satisfied: Send2Trash in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from notebook->jupyter->flaml[notebook,ts_forecast]) (1.7.1)\n",
- "Requirement already satisfied: jinja2 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from notebook->jupyter->flaml[notebook,ts_forecast]) (3.0.2)\n",
- "Requirement already satisfied: pyzmq>=13 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from jupyter-client->ipykernel->jupyter->flaml[notebook,ts_forecast]) (22.0.3)\n",
- "Requirement already satisfied: pywin32>=1.0 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from jupyter-core->nbformat>=4.2.0->ipywidgets->jupyter->flaml[notebook,ts_forecast]) (227)\n",
- "Requirement already satisfied: pywinpty>=1.1.0 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from terminado>=0.8.1->notebook->jupyter->flaml[notebook,ts_forecast]) (1.1.3)\n",
- "Requirement already satisfied: MarkupSafe>=2.0 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from jinja2->notebook->jupyter->flaml[notebook,ts_forecast]) (2.0.1)\n",
- "Requirement already satisfied: entrypoints>=0.2.2 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from nbconvert->jupyter->flaml[notebook,ts_forecast]) (0.3)\n",
- "Requirement already satisfied: bleach in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from nbconvert->jupyter->flaml[notebook,ts_forecast]) (3.3.0)\n",
- "Requirement already satisfied: nbclient<0.6.0,>=0.5.0 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from nbconvert->jupyter->flaml[notebook,ts_forecast]) (0.5.3)\n",
- "Requirement already satisfied: testpath in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from nbconvert->jupyter->flaml[notebook,ts_forecast]) (0.5.0)\n",
- "Requirement already satisfied: jupyterlab-pygments in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from nbconvert->jupyter->flaml[notebook,ts_forecast]) (0.1.2)\n",
- "Requirement already satisfied: pandocfilters>=1.4.1 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from nbconvert->jupyter->flaml[notebook,ts_forecast]) (1.4.3)\n",
- "Requirement already satisfied: defusedxml in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from nbconvert->jupyter->flaml[notebook,ts_forecast]) (0.7.1)\n",
- "Requirement already satisfied: mistune<2,>=0.8.1 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from nbconvert->jupyter->flaml[notebook,ts_forecast]) (0.8.4)\n",
- "Requirement already satisfied: async-generator in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from nbclient<0.6.0,>=0.5.0->nbconvert->jupyter->flaml[notebook,ts_forecast]) (1.10)\n",
- "Requirement already satisfied: nest-asyncio in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from nbclient<0.6.0,>=0.5.0->nbconvert->jupyter->flaml[notebook,ts_forecast]) (1.5.1)\n",
- "Requirement already satisfied: packaging in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from bleach->nbconvert->jupyter->flaml[notebook,ts_forecast]) (21.0)\n",
- "Requirement already satisfied: webencodings in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from bleach->nbconvert->jupyter->flaml[notebook,ts_forecast]) (0.5.1)\n",
- "Requirement already satisfied: retrying>=1.3.3 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from plotly->catboost>=0.26->flaml[notebook,ts_forecast]) (1.3.3)\n",
- "Requirement already satisfied: qtpy in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from qtconsole->jupyter->flaml[notebook,ts_forecast]) (1.9.0)\n",
- "Requirement already satisfied: chardet<5,>=3.0.2 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from requests->openml==0.10.2->flaml[notebook,ts_forecast]) (4.0.0)\n",
- "Requirement already satisfied: urllib3<1.27,>=1.21.1 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from requests->openml==0.10.2->flaml[notebook,ts_forecast]) (1.25.11)\n",
- "Requirement already satisfied: idna<3,>=2.5 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from requests->openml==0.10.2->flaml[notebook,ts_forecast]) (2.10)\n",
- "Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\kevin chen\\anaconda3\\envs\\python38\\lib\\site-packages (from requests->openml==0.10.2->flaml[notebook,ts_forecast]) (2021.5.30)\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"%pip install flaml[notebook,ts_forecast]\n",
"# avoid version 1.0.2 to 1.0.5 for this notebook due to a bug for arima and sarimax's init config"
@@ -176,6 +75,35 @@
"y_test = data[split_idx:]['co2'] # y_test is a series of the values corresponding to the dates for prediction"
]
},
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYUAAAEGCAYAAACKB4k+AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAgAElEQVR4nOy9eXhkd3nn+/mVpNpVpdK+dku9ubvdbrdNewFjFtvEJjCQwIR4huRmmQs3CbmZkNnCwGQCE0/IPjNZyMNkgdwnCZgMzBATAo6xjcHY7W73vi/a1ypJVapFVaWq+t0/zlLnqOR22+7qVrfez/Pocen3q3N0jjHnPe/2fZXWGkEQBEEA8FzvCxAEQRDWD2IUBEEQBBsxCoIgCIKNGAVBEATBRoyCIAiCYNN4vS/gjdDe3q4HBwev92UIgiDcUBw6dCihte5Ya++GNgqDg4McPHjwel+GIAjCDYVSavSV9iR8JAiCINiIURAEQRBsxCgIgiAINmIUBEEQBBsxCoIgCIKNGAVBEATBRoyCIAiCYCNGQRAEYR1TKJX5mxfH+PujU9fk793QzWuCIAg3O3/w5Hn+9NmLANw91EpXxF/XvyeegiAIwnUmUyiRLZTW3Ht5bNH+PJVcrvu1iFEQBEG4zvybx49w63/+FhfmMjV7l+IZbuuLAjC7lK/7tdTNKCil/EqpA0qpo0qpk0qpT5vrX1ZKHTF/RpRSRxzHfEIpdUEpdVYp9XC9rk0QBGE98e1TswB85eC4az2ZK5LIFHnLtjYAZpcKdb+WenoKBeABrfXtwD7gEaXUvVrrH9da79Na7wP+F/BVAKXUbuBR4FbgEeBPlFINdbw+QRCE606pXMGjFAD/dHrWtWd5DncPttLUoJi5kT0FbWD5Qk3mj7b2lVIK+BDwt+bS+4Evaa0LWuth4AJwd72uTxAEYT0wncpTrmj6WgJcjGdZLpbtPcsobO9sprPZf2OHjwCUUg1meGgOeFJr/aJj+35gVmt93vy9D3D6ThPm2upzflQpdVApdTAej9fr0gVBEK4J44s5AN62ox2AsYWcvXdhLoOv0UNfLEBnxGcbheViGa117cmuAnU1Clrrshkm6gfuVkrtcWz/C6peAoBa6xRrnPPzWuv9Wuv9HR1rzogQBEG4YTg3kwbgHbd0AjA6n7X3zs6mGWoP0eBRdEf8zKQMo/Ajf/x9fvFvD9fleq5J9ZHWOgk8g5ErQCnVCHwA+LLjaxPAgOP3fuDadGsIgiDUmXOzad78m09xamrJtX5wdJHeqJ97hlqBqqcwksjy/QsJ7t9ueBBdET9zSwW01owv5ugI++pynfWsPupQSrWYnwPAQ8AZc/sh4IzWesJxyNeBR5VSPqXUELAdOFCv6xMEQbiWfOXgONOpPJ/9xzOu9UOji9y5OUZL0EvY18ik2Yvw1Jk5Khp+5r4hwDAK6UKJicVlcsUym1qDdbnOenY09wBfNCuIPMDjWusnzL1HcYeO0FqfVEo9DpwCSsDHtNZlBEEQbgIOjhpNaCcnU/baZHKZ6VSe/ZtjALSFvSxki4CRT2gJNtETNTqYu6M+8zwLAAzcaEZBa30MuOMV9n76FdYfAx6r1zUJgiBcD7TWnDVzB/PZIqncCtFgE4dMQ7F/0AgdtYaqRuFiPMO2jjDKLFftajaMw8ER45iB1kBdrlU6mgVBEOpMPF0gVyzb+YFhM5l8ciqFt8HDzu5mANpCXuYzplGYy7CtM2yfoyu6yijE6uMpiFEQBEGoM5cShhF4YKdRYXQpbvQfjCZyDLQGaGwwHsWtIS/z2QLp/Arz2SKD7SH7HJYQ3tnZNK0hLyFffQI9YhQEQRDqzIhpFO7fbpTRW8J2I/NZNrdVH/ytIR8L2SITi8a+0xsI+xoJm4ZgIFaf0BGIURAEQag7w/NZvA0ehtpDRPyNzKWN0tKxhRyb26oP/raQl5Wy5vS0Ubbav+rh3xUxks39dUoygxgFQRCEq8Zkcpnf+dYZyhV33+1IIsumtiANHkWn2W8Qzxh5hs2OB3xryAvAsQmjQml1hZG/yZCDq1c+AcQoCIIgXDV+7HPP88dPX6xpUBtOZBk0w0SdzT7imQJj80aT2mZH3qA1bBiFI+NJgt4GYsEm13msczx8a1fd7kGMgiAIwlWgUtFMmTIUww6pinJFMzqfY6jdeLvvbPYxl84zYhkFhzfQHjLCQ8cmkgzEgnY5qsWn338r3/7427hjU6xu9yHjOAVBEK4CC7mi/dmqLgKjCa1QqrC7NwJgh49G57N4FPQ7QkGWp1DRtfkEgPawj/Y6yVtYiKcgCIJwFXDKWl+KVz2FoxNJAG7rawGgI+yjUKpwYjJFb0sAb2P1Mdxm5hSgfh3Lr4YYBUEQhNfAX784yp7//C1yRfdM5TlzKlqzv5FLiaqncGIyRdjXyBYzd9BpVhC9NLJo5wgsrEQyrO0pXAvEKAiCILwGPv33p8gUSvz9UbeI81za8BTevKWN4XjWnncwsbjM5rYgHo+RH+hoNoxCplBylaOuZktH6BX36okYBUEQhCskVyzZD/vnzidce9b85Hu2tJEtlu3fp5LL9ESrb/2dpoYRcFmjcN+29qt23a8FSTQLgiBcIS9cmmelbBgFS+LaYjiRpbPZZ+sYXYpn6I76mUouc7c5KwGq4SPA1c1s8bkP30lZa3yN12dEvRgFQRCEK+S75xL4mzw8fGs3P7g479p7aWSBN22O2bmA6VSeTKHEUr5Eb0vVU2h2aBbt6YvW/I1339ZTp6u/MiR8JAiCsIoTkym+fXKmZv3w2CJ3DMTY0h5mLl0gv2KMfJlOLTOxuMxdg612yWg8U2Da9CasmQgASikaPIr+WIC+luuTTL4c4ikIgiA4KJUrvPcPvwfA4f/0LmJmmWipXOHMTJqfvHezyxsYag/xkilnfddgKyFfI0FvA/F0wQ4xrX74H/rUQ65Ko/WEeAqCIAgOjjkmoz1xrFphNJzI2k1o3eabv9WbcHBkgZC3gV09Rj6ho9lHPF1g2uxw7lllFFqC3nVrFMRTEARBcDCbqjahXZjL1Hze0dVMY4NRXmoNxDkwvMCdm2P2XISOsI9EpsBUchmPgq7m+nYhX03EUxAEQXBgvf13NPtcFUaWntFge8jOGyQyBVLLK5ydTXPXYLXCyPIUppJ5uiJ+21jcCNw4VyoIgnANmE0XaGpQ7OmNMJmseg2jiRztYR9hXyOxoBePgvlMgSPjSbSG/ZurInXtYUMJdSq57Ko8uhEQoyAIguBgbqlAR9hHfyxoT0gDw1OwlE4bPIrWkJd4psjYgqF2utUxTzkW8pJaXmEimXNVHt0IiFEQBGFD8tz5ON89F69Zn0vn6Yz46W0JkFpeIVswNI5GEu7RmW0hH/OZArOpPA0e5VIvbQ02oTWMLyyvy7LTyyGJZkEQNhzFUoWf/PMDAIx89j2uvcnFZXZ0NdNmylgvZIsoBXPpAkOOgThtYS/z2SIzS3k6wj4aPNXZBzGH2ql4CoIgCOuc71+s6hbNOSSvl/IrXEpkubU3QixoPNgXc0VGEuZAHIdWUbtZYTS7lKdr1YPfOhaQnIIgCMJ659xM2v58cHTR/nzCnI28d6DFHoW5mFth1Ko8alvlKWSKzKTydEfcJaetITEKNSil/EqpA0qpo0qpk0qpTzv2/l+l1Flz/bcd659QSl0w9x6u17UJgrCxGU5k8TcZj79RcywmVBvXbuuL0mIahWSu6CpHtWgP+8gUSlxKZF0qqIB9LFy/YTmvl3rmFArAA1rrjFKqCfieUuqbQAB4P7BXa11QSnUCKKV2A48CtwK9wD8ppXZorct1vEZBEG5ivnl8ms6Inzdtds80Hk5k2dMb5UI8w2TSYRQmkgy0BmgNeamYEtnJ3IqrHNWi3cw5lCvaVXkEbk8hGmjiRqJunoI2sNoBm8wfDfw88FmtdcH83pz5nfcDX9JaF7TWw8AF4O56XZ8gCDc3Wmt+/q9f5oOfe75mbziRZbA9RG80wJSjF+HoeIq9/cbYzJaAFT4quspRLdpC1ZDRtg63UQg0NdAe9vLxh3Zctfu5VtQ1p6CUalBKHQHmgCe11i8CO4D7lVIvKqWeVUrdZX69Dxh3HD5hrq0+50eVUgeVUgfj8dpyMkEQNhYvXponni7UrFv9A4BrdGa2ULIrifpiASYXjV6E+YwhYLfXlLNubPDQ7G8kmVupKUcF7OokgG2rPAWlFAc/9S7+9UPb3/gNXmPqahS01mWt9T6gH7hbKbUHI2QVA+4F/h3wuFJKAWqtU6xxzs9rrfdrrfd3dHTU8eoFQVgvPH1mzu4XcDKXzvPjn3+Bf/k/X6jZO+4Qtjs2Uf08YuYHhtpD9LUE7AY1K59geQpg5AamksvMpQsMrpqS1uHQM2p3GIgbnWtSfaS1TgLPAI9geABfNcNLB4AK0G6uDzgO6wemEARhQ3NiMsXPfOElfvObp2v2vn7EeEScn8swn3F7C06j4PQkhhPVSqKOZh/pQon8SpnjEymUgj19Efu73RE/h8eTQG0VUV9LgF97726+/6sPYLzX3hzUs/qoQynVYn4OAA8BZ4D/DTxgru8AvEAC+DrwqFLKp5QaArYDB+p1fYIg3Bi8cMmYcOasErJwPvhXj8c8MZmyu4mdBuPMdBqPgsH2IG2haoPacCJLbzRAs99RORQL2gale1UvglKKn33r0A3Xsfxq1LP6qAf4olKqAcP4PK61fkIp5QX+Qil1AigCP6WNSdgnlVKPA6eAEvAxqTwSBOGlkQWANcNHI4ksIW8D2WKZ+WzRXtdac2JyiXfv6ebLB8dZcOw9dWaO/ZtbCXobaTOlKeYzRVO8zv3g73eUk64uO71ZqZtR0FofA+5YY70I/MQrHPMY8Fi9rkkQhBuPCTMRfH42g9baDtVorRlOZLlzc4znzidYyFQf/PG0IWm9qydCa9BLwjQKC9kip6eX+A+P7ASqpaOJbIGp1DJ3bnKXrm5yGIXuyI0lV/F6kY5mQRDWNdb0snShRLZYDR4s5lZYypfsHgSnNzC+aISaNrUGaQ15bYNhVRpt7TAqiawEcTxdYCaVr/EGtnRUK44C3vU5Ke1qI4J4giCsW/IrZRayRYbaQwwnsixkinYDmVVFdFtflKYG5QofjS8YD/+B1gBtYa9tMKy8g5U0tsJH52bSrJQ1favCR3cMtPBLD24nt0bo6mZFjIIgCOsWq1z01t4Iw4ks89kCm8zS0JFEVXqiNeRlIVtNJo+bPQr9sSBtIR+nZ5Zc57OMQsjbgK/RY5esrvYUlFL8yrtuvAa0N4KEjwRBuO6cn00z+Kvf4NDogmvdCh3dZjaULeaq3sBIIotHGRVCrSGfK3x0zKw88jc1uDyF6dQyvkaPLXanlKIn6ufQmCGKd6OJ19UDMQqCIFx3njw9C8DXDk+61idtT8EwCvOOZPLwfI6+WABvo4e2kNcOH6VyKzxzdo537+kGjGRyMrfCSrnCVDJPb0vA1VfQHwtSrhh9squrjzYiYhQEQbjuzJoewXKx4lqfNnWJdvcaDWVOb+D09BI7OpsBzPCRsXd8MsVKWfPOnZ1ANW+wmCsyvpijP1bbhAYQ9DbccOJ19UCMgiAI150TU0bM/+zskmt9OrVMe9hHLNiEt9FjP/hTyytcmMuwb8CQpHBWGDllLAC7QW0+U2R8IVcjZW0ZidaQ96bqTH69iFEQBOGaoXWNnBm5YoljE4aUxEgi5/rOpNlQppRyhYis7+/bZBiFtpCXdKFEoVRmdD6Lr9Fj9xVYRmFsIcdiboWBmNsoDJllp3v7o1fzVm9YxCgIgnBN+PujU7zls9+xp5hZHBxZZKWsuX97O5lCidTyir03ubhMb7T6Jr9oGoUjY4ZRsMTrWs1+g8XsCiPzOTa3BfGYM5MtNdOjpobRQKs7fPTuPT387Ufu5b8/WtNruyERoyAIwjXh418+wnQqz5dfGnetW/pF77u9F6h2MOdXyozMZ9nRXc0bWJ7CkfEkWztCdg7ADhFlC4zOu2WurbkHh01DstpTaPAo3ry1jaYGeRyCGAVBEK4BxVKFklnh8+w59xyUi3MZeqJ+dnYbyWTLKJydSVPRsLunNpl8bDLF7QNVievWUFXDaHQ+55K5bgk24Wv02BpKN9p4zGuNGAVBEOpO3FQp9Td5GJ135w0uxDNs6wzbCd8JU6LipJl8toyFZRSyhRLxdIGtjmlnllzF8ckUhVLF5SkopehtCVCqaELeBrtHQVgbMQqCIFw1yhXN3FK+Zn3WXLtrsJVMoWS/8WutuTiXYWtHmBazwmjOlKp+6vQsvVE/m823/raQl0yhZM9DcL7xWwNvLG/Aqjyy6DFlrwdag1Jh9CqIURAE4arxR9+5wN3/9Sn7bd9izmEUAEZNGYrpVJ5sscy2zjBKKTqbfcTTBfIrZZ47n+CRPT32Q9wKEVmSFM5+g7CvkUBTAz+4aMxeGKwxCoGaY4S1EaMgCMJV4yuHjCTylw64k8mzS8bbv2UULG2iC3MZoDrjuMM0ClPJZYrlimsKmiVzbVUROR/wSik6Iz4KpQodzT56awbiGP98+y2db/wmb3JEEE8QhKtCfqVsaxUdNfsILEbnc3gbPezoMh7+llxFjVEI+xiZz9ryFs6pZp0Rw1M4NLaIr9FDR7g6I9k6dnQ+x/7NsZoQ0c+/Yyu9UT//8u5NV+Veb2bEUxAE4aowlVy2NYSmVo3GfP5igv2bY7QEvXhUVdjuQjxDS7DJLintjBiegjX3wClQ12U2o12Yy9AfC9Q8+K2qpbdub6+5tq0dYX7lh26hwSP5hFdDjIIgCFcF6+1+T1+E6VTerjB6/mKCMzNp3rq9nQaPoiVYLS29MJdhW0fYfsB3hP0s5owGNI9yz0V2egb9sdqy0nfu7ADgR+/oq88NbhDEKAiCcFWw3tT3b24lVyyzlDcG0/zF94bpifr5v948CEAs2GR7ClblkYUVIjo6nqQ74nc1lFlqqLB2wvjT79vDkV97F0GvRMXfCGIUBEG4KkwuLtPgUdxh6hFNpwwjMbaQY09f1J6YZvUbLGaLzGeLdj4Bqt7A4fFF+tZ48BfLhorqWg1o3kYPLUHv1b2pDYgYBUEQrgpHJ5L0xwJ2cnjGDCFNLC67pCViQS+L2RUuxN1JZqj2G+RXKmsOvHnbDiNEdP8aeQPh6iBGQRCE18TfHhjjJ/7sRVdX8kgiy3PnE3xo/4D9YE9kiizmVsgVy65wT2vIy0KuyMW5WqNghY/AXXlk8ZsfuI3D/+ld9tAd4eojwTdBEF4Tn/jqcQDOz2XY0WXoElmlpW/d1k67GQKKpwuOWcnVB3zMVDs9N5vB3+RxPfwt8TpgzfBRxC8SFfVGPAVBEK4Yp3fw9Jk5+7PVwdwfCxDyNRLyNpDIFOzkszMH0NXso1TRvDSywJb2sC1xDUZewGKTCNddF8QoCIJwxcw4dI1GHHMRJhaXCTQ12F3H7WZnsmUsnG/9Vpnp8cmUK3RkEfEbAYw3b2m7+jcgvCp1MwpKKb9S6oBS6qhS6qRS6tPm+q8rpSaVUkfMnx92HPMJpdQFpdRZpdTD9bo2QRBeH5YYHVSlK8AwCs6Gso6wYRTGF3NEA02usI/VhAasaRSe/JW3c+TX3kWjzDe4LtQzp1AAHtBaZ5RSTcD3lFLfNPf+QGv9u84vK6V2A48CtwK9wD8ppXZorct1vEZBENYgkSlQKFVqkr0TC0Y4aFtn2FY+BZhI5lzeQEezj3OzaXxNnppJZ69mFJz7wrXnNZlipVRMKbX3Sr6rDTLmr03mT+2A1irvB76ktS5orYeBC8Ddr+X6BEG4Otz/W09z32e/U7M+vmh0Gu8baFnTU7DojvqZSeUZX8jVGBarOgngFnOqmrB+eFWjoJR6RikVUUq1AkeBv1RK/f6VnFwp1aCUOgLMAU9qrV80t35RKXVMKfUXSqmYudYHOKUVJ8y11ef8qFLqoFLqYDweX70tCMIbJJkrsrxiOOgzKfdshLGFHD1RoxdhPltgpVwhnV8hmVtxSU/0RgNki2UuxrM13kBTg4d33NLBz719q6ubWVgfXImnENVaLwEfAP5Sa/0m4KErObnWuqy13gf0A3crpfYAnwO2AvuAaeD3zK+vpVRV41lorT+vtd6vtd7f0dFxJZchCMJr4OWxRfvzi8Pzrr2xhRybWoN0RfxobZSdWppHTk+hp6UaArLKVp184Wfu5lffvfNqX7pwFbgSo9ColOoBPgQ88Xr+iNY6CTwDPKK1njWNRQX4n1RDRBPAgOOwfmDq9fw9QRBePzOpalhoJFEdlqO15sLs6tGZy3aeweUpOEJGaxkFYf1yJUbhM8C3gAta65eUUluA8692kFKqQynVYn4OYHgXZ0wDY/GjwAnz89eBR5VSPqXUELAdOHDltyIIwmvhR/74+/zylw7XrM+ljZBRS7DJ1i8Co9ooXSixoyts9x2MLeTs0tQBh6fQG61+3tLhnoImrG9etfpIa/0V4CuO3y8BH7yCc/cAX1RKNWAYn8e11k8opf4/pdQ+jNDQCPD/mOc9qZR6HDgFlICPSeWRINSH8YUcR8aTHBlP8rs/drur/HN2qUBbyEtvS8AemgNwemYJgG2dzfS2+FHKOM90apn2sJc2h7R1V8THz943xHv29uBrbLh2Nya8YV7RKCil/pDLVAtprX/pcifWWh8D7lhj/Scvc8xjwGOXO68gCG+c7zi6kY9PprhjU8z+PZ7O0xnx0xP1MzpfDR9968QMIW8D+wZa8DU20B3xM76Y42I8WxMiUkrxa/9sd/1vRLjqXM5TOHjNrkIQhGvKsYmU/Xk4kXUZhdmlAp3NPnqifn5wyUg0a6158tQsD+3uIuA13vz7YwEmFpY5P5vmQ/sHEG4OXtEoaK2/6PxdKRXSWmdf6fuCIKwvVsoVZlL5NUdXHp9Mcs9QKy8OL7j6DcAYpXlrb4SuqJ90vsRysUyuWGI+W2Rvf4v9va6In2fPxskVywy2iU7RzcKV9Cm8WSl1Cjht/n67UupP6n5lgiC8Ib7w/RHu/+2n+fx3L7nWV8oVLsxluGuwlbCv0U4sg1FiOp8tsr2rmfaQJYFdsFVQnT0HPVE/6YIxXW2t2QfCjcmVVB/9N+BhYB5Aa30UeFs9L0oQhDfO6ILh2P/vI+7K7ni6QEUbD/LOiI85h6dwdiYNwM7uZtqbDXG7RKZgD8TZ6qgkcspRrCVzLdyYXJHMhdZ6fNWSVAUJwjpnMbsCwOnpJRayRXvd0izqivjobPa5PIXjk0auYWd3sz0XIZEpMhzP4mv0uEpNLbVTgP4WCR/dLFyJURhXSr0F0Eopr1Lq32KGkgRBWL84DcG52bT92cohdEX8dEX8rpzCs+fm2NndTFvYZ5eYWnMR+mMB1+yDboenEAnIvK6bhSsxCj8HfAxDh2gCQ57iY/W8KEEQ3jgL2SLbzRzAVNLZhGZ5Cn7aQj7mM4ZRSOdXODiyyDtu6QSgzZyNMJ8pMJHMuQblANzWH+Un7t3E73/o9ppEtnDjciXmXWmtP1z3KxEE4XVxMZ5hU2uQplXzBxZyRd66rZ3zcxmXUZhO5Wn0KNpCXtrCXrLFMvmVMs9fnKdU0bzjFkNTzN/UQLO/0Ryrucy+gRbX+X2NDfzGj9xW/xsUrilX4ik8r5T6tlLqX1myFYIgrA8WskUe/L1n+eUvHXGta61ZzBbpifppD3tt0TqA87NptnSE8JiGwTrP984nCPsaedPmas/CptYgxydTpJbdKqjCzcurGgWt9XbgUxjDb15WSj2hlPqJul+ZIAivysGRBQC+cXya/Eq1/mMpX6JU0bSachXWrGSAU9NL7O6JANjjMxeyRS7GM2zvCrs8jlu6mnl5LAm4VVCFm5crrT46oLX+FQxF0wXgi69yiCAI14BDo1WZa6dO0UWzhLQ/Zshcx9NG3mAhW2Q6lWd3r2EU2sLVstOJxWUGVnkDziE4q/eEm5MraV6LKKV+yhyl+TzGDASZiCYI64CL8arIgFPR9GXTWNy5qYX2sJdExqhEOjpuvPXf1mdEglvNBrV4usBUcrnGG9hlehQgnsJG4UoSzUeB/w18Rmv9gzpfjyAIr4G5dJ7NbUFG53NMJ6uewstji/S1BOg0K4wWsgUqFc2h0UUaPMpOGluewqnpJUoVXZM3uGNTNY1ohZqEm5srCR9t0Vp/HDhW74sRBKGWUrnCF74/TDq/UrM3u5TndlOPyPIUtNYcGF7g7qFWANrDXioakssrHBlPsrsnYovaNfsaCXobODhieBYDrW5voNnfZH+WstONwZUYhXtF+0gQrh/fOD7Nr//9Kf7HU+7ZVuWKJp4usLktSCzYZOcULsazJDJF7jGNgrMJbWQ+65KqUErZFUbAmhVG//BL9/PVX3hLXe5NWH+I9pEgrHO+fXIWgDMzadf6fMbQMDJmH1QH4lhzle/Z0gZUQ0QzqTxTyWU2rWpCs35XCnods5UtdvdGuNMhrS3c3Ij2kSCsY7TWvGDONDg8lkTr6tyrGaszudlHb4vfblB78dICnc0+W866w/QUjk+mqGjoX2UUNju+J1PSBNE+EoR1wHymwK9//SQZU4raYmwhx3y2yNaOEJlCiWSumlc4MWmNxwzTHfXbRuLw+CJ3DbbaOQArfHR4zMgbrPYUtptT07Kr/rawMXm92ke/UM+LEoSNxn/6Pyf4wvMj/MPxadf6UXNC2nv39gK4mtBeHJ6no9nHUHuInmiAZG6FXLHEbKrg0ilqCTTR4FEcNpvQVhuFH9nXx3tu6+FfP7S9Lvcm3FhcSUdzQmv9Ya11l9a6U2v9E8B/vAbXJggbhgPDRmfyoZFF1/qleAal4O2mHtHEYnVm8vGJFHduakEpZecCzsykKZYrdDT77O95PIrWkJf5bJGmBuWagwDgbfTwxx++k4++bWtd7k24sbiinMIafOiqXoUgbGAWs0W7uezwuNsojM7n6I0G2NphqJ1ankKxVGF0IWdPQrMe9CfMKiKnUYCq4ml/LEiDR0pLhVfm9RoF+a9KEK4S1qyDwbYgMw6pCoDhRJbB9iDRQBNBb4OdNxhbyFGuaLa0G0ahzexMtianWcllC2tgzmr5a0FYzSsaBV494sMAACAASURBVKVU6yv8tCFGQRBeM+MLOX77H8+wUq641i2jcN+2dpbyJVvYrlLRXIxnGGo3+gpiQS+LOcOjuGRqG20xew5ioSbXuVZ7CkGzWW1Tq0hVCJfncjIXhwDN2gaguMaaIAiX4Xe+dZavH51iqD3Ej+0fsNcvxrOEvA3s7Y/y1y8aOkQDrUFG5rOk8yX22jpFXhbNaWqWR2CFj2JBIzxk9TKsNgpWVdOP7Our4x0KNwOv6ClorYe01lvMf67+2fJqJ1ZK+ZVSB5RSR5VSJ5VSn161/2+VUlop1e5Y+4RS6oJS6qxS6uE3dmuCsL64MGe83T9xzF1hdDGeYWtnmE4zL2DNTD46YVQL3W7qFMVCXhbMktQTUymG2kO2DEVTg4dmfyPpfImWYBMRv/t97zPvv5X/8v5b2T/YWqe7E24W6jlYtQA8oLXOKKWagO8ppb6ptX5BKTUAvAsYs76slNoNPIoxt6EX+Cel1A6ttTTKCTc8iUyBU9NGX4FTzRTg4lyGe7a00Wm+3c+ZM5NPTC7ha/TY3kBrsImRRNbec4rVgeFJpPMltnaEa3SKtnU2s62zGUF4NV5vovlV0QYZ89cm88dqx/wD4N87fgd4P/AlrXVBaz0MXEAkuoWbhO9fSACwb6CFWfOhD0bD2FQqz9aOEJ3Nlqdg7F+Yy7C1I2xXC8XM8FG2UGIyueyStYZqCGmbWakkCK+HuhkFAKVUg1LqCDAHPKm1flEp9T5g0tRQctIHOOU0Jsw1QbjhOTiySLOvkQd3dpJaXrGTycPmm//WjjBtIS8NHmWHjy7MGZPQLFqDXtKFkh2GGmwLuf5GsWQksHd0i0cgvH7qahS01mWt9T6gH7hbKbUX+CTwa2t8fa2Etq75klIfVUodVEodjMfjV/eCBaFOzC7l6W0J0NMSsH+H6oS0LR1hPB5Fe9jL3FKBXNHwBpxv/TGz1+CIOSjH0iyy+Km3bOYDd/Tx6F0DCMLr5XIlqbcppV5QSo0rpT6vlIo59g68lj+itU4Cz2CEiIaAo0qpEQxj8bJSqhvDM3D+19wPTK1xrs9rrfdrrfd3dHS8lssQhLrz5ZfGeNfvP8uZmSXXeiJToL3ZS7eZTLb6ES7OZfAohyhds4+5dIHReaNzecghc21VFL1kzmXetMoo/Phdm/j9H99HyFfPVKFws3M5T+FzwK8DtwHnMBLFVh980ysdZKGU6lBKtZifA8BDwGFTKmNQaz2IYQju1FrPAF8HHlVK+ZRSQ8B24DUZH0G43nzumYucn8vwhe+PuNbjmQLtYR/dUePBPrNUnX0w0BrE32T0EXQ2+5lLFxhbMIyCU6eoJ2oYlBcuLdAa8hLxv+r/DQXhNXO5V4qw1vofzc+/q5Q6BPyjUuonWSOsswY9wBeVUg0YxudxrfUTr/RlrfVJpdTjwCmgBHxMKo+EG4nxhRwj5hu+FR6ySKSLtId91bJTM9l8bjbN9s5qiKiz2cexiSTjaxoFI/SUyBTscZqCcLW5nFFQSqmo1joFoLV+Win1QeB/Aa9a7Ky1Pgbc8SrfGVz1+2PAY692bkFYj1ix/u6Iv6bCaHmlTHvYZ4+/nFnKs1KuMJzI8tDuLvu7nc0+5rNFhhNZmv2NRANVb6At5KWpQbFS1vasBEG42lwufPRbwC7ngvmgfxD4aj0vShDWM8VShUSmULN+fDKFt9HDW7e32xVEAGdN6YnOZh9KGSqlM0t5RuezlCra5Sl0RPxobQzUGYgFXf0GltopwKZVlUeCcLW4XEfz32itXwBQSoWVUiFzfUxr/ZFrdYGCsN74xb95mf2/8U989eUJ1/rp6SVu6WqmryXAfLZoaxz96TMXiQWbeHBXJwBdER9zS3nOzxqVRzu6qiWkVgPbqemlmrkHUA0niacg1IvLlqQqpX5eKTUGjGJMYBtVSsmAHWHDcnp6iW+fMmYmHzMH4FiML+TY1Baky3zbt7yJU9NLvG1HBy1mc1m36SmcmzVmJWztcOcULFZXFwH8+U/fxW/8yB5++Laeq35vggCXL0n9FPDPgHdordu01q3AO4F3m3uCsOE4bhoCb6OHyWRVrqJS0Uwl8/THAnRFjAf77FKBQqnMVHLZ1WjWZeYczs2l6Y8FCHirc5E7HQNwBmK1iqYRfxM/ce9mu1pJEK42l0s0/yRwu9baDo5qrS8ppT4EHAV+o94XJwjrjbOzafxNHu4ZamPSMRoznilQLFfojwVtuYrZpTxhXyMVDYPt1bf+roifYqnCwZEF9vRGXed3zkGQ2QfC9eCy4SOnQXCsLQOVNb4uCDcNf/rsRf702Ys160YJaTObWoMuT8FqNutvqXoKRhOaIWOxeZWnAIYnsa3LrVPkbaz+X/L2fik7Fa49l/MUJpRSD2qtn3IuKqUeAKZf4RhBuOEpVzSf/eYZAD58zyZbnhrgUjzLXYMx+mIBUssrZAslQr5G/vHEDN4GD/sGWogEmvAomFvKU6kYLT39jlCQ1cAGsGMN5dLf/9DtDLQGbVkLQbiWXM4o/BLwf5RS36M6cOcu4D4MuQpBuCk5PllNIH/3XIL37DWSuivlCtOpZTa19tkJ4Xi6QMjXyLdPzfD2WzrsB3lHs4/ZpTzFcgVvg4f2UNUQdDnyBtu7ahVNP3Bnf13uSxCuhMuVpJ4E9gDfBQaBLebnPeaeINyUnJyqGoURM/wDMJVcpqKhvzVo6xDNpQvkV8pMLC5zW181P2DJVUwn83RH/Xg8yrVn4SxHFYT1wCt6CkqpbUCX1vovVq3fr5Sa0lrXBlwF4SZgfGGZpgZFyNfoyhuMLxif+2MB2kJVT8EyHIPtzryBj8lknky+ZGsWWXgbPfztR+5lW2dYqoiEdcflEs3/DUivsb5s7gnCTcn4Yo6+lgADsSATjgqj75mDcra0h21PIZ7OMxzPmutVo9AZ8TO3lGc6laevpba09M1b22rmKAvCeuByOYVBU9bChdb6oFJqsG5XJAjXiEvxDNFAE21h98N5YiHHQGuQsK+Rc6ZERbmi+asfjPDevT10R/1UKppGjyKeKZAtGrqNTk/B0jAC6Fuj30AQ1iuX8xT8l9mT/8qFG5r5TIEHfu9ZPvi552v2xheXGWgN0tsSYCppVGVPLObIFcu8bbsxw8MYiONjbqnAcCJLZ7OPsGOOgTOZvKVDdIqEG4fLGYWXlFI1GkdKqX+FUY0kCDcsXzlk6BaNzOc4NVUdiJMplFjIFhmIGcnk5ZUy2ULJMSHNPfQmnikwksgy1O5+8Fu9CgBD7TIzWbhxuFz46JeBrymlPkzVCOwHvMCP1vvCBOFqMDafozPiq0noOstOhxNZdvdGAOw5BgOtAQorRo9mPF3gUrw6S9mis9nHzFKemVSeH7q1Kn9t7FU9hSFRNBVuIC5XkjqrtX4L8GlgxPz5tNb6zeakNEFY16SWV3jb7zzNx798pGbv1NQSb9naBsB0yllhZBqFWJB2K5mcMUJELcEmV0NZR7OPC3MZ5rPFGk9ha0eYOze1cP/2dqJBmZAm3Di86jBXrfXTwNPX4FoE4ary3Pk4AN88MUOxVLElJDKFEsOJLB+4o48j40k7bwC4xmA2mXOUE+kCU8nlmiqijmYfhZLhTQyu8gYC3ga++gv31efGBKGOXFb7SBBuBC7FM5QrtRNif3Bx3v48vpizP5+eNnIIt/ZF6I76mVmqegrPX5ynPxagJdhULTvNFJhO5e1xmBbOklJJJgs3C2IUhBuagyMLPPB7z/LF50dq9oYT1W7kmVTVGzhp5hN290TpjVYrjLKFEs+dj/PuPd0oZUw5MzSMCkwml+ltcRfkOWcfiKKpcLMgRkFY92itOTaRROtab+CLPxgF4KWRhZq90fkcd24ylEannUZhaom2kJeuiI/OZh/xtDEM51I8y0pZ86bNxgjyBo+iJxrgzEyadL5E76rw0a4eIznd1xLA1yidycLNgRgFYd3z3fMJ3vdH3+evTAPg5PhEEsBuMrPIr5SZSi1z95CRTJ5xJJNPTS+xuzdiewMLZpNZVa6i+ta/qTXIi5eMMNRquYrNbSGe+/fv5G8+cs8bvUVBWDeIURDWPdaD/+8OuWci51fKjC7k8Ci4GM+SK5bsvfGFHFrDLd1hYsEm21Moliqcm01zqzncpi1s9CLkiiV79oFzNvLmtiDpgnHeLWv0Gwy0Bl2zEgThRkeMgrDusXoKhhNZVwjpwlwGrbHnFVu9BABHxg1DcmtvlO5ogNklwyicn0uzUtbcavYltJklpvOZIqPzOTqbfQS91aI85wN/SJLJwgZAjIKw7jllVgtlCiUWcyv2uhUysoyC1XUMcGh0kYi/kW0dYbojPttTOGl2L1tGodU0CgtZwyisLi3d1VOVtnbKWAjCzYoYBWFdUygZswr29BkP8VHHfINzsxmaGhTvuKWDBo/iwlzVKByfTHH7QAsej6I7GrCrj05NLRH0NtgP/7aw6SlkDQnszW3uKqL7Ta2jwTapLhI2BnUzCkopv1LqgFLqqFLqpFLq0+b6f1FKHVNKHVFKfVsp1es45hNKqQtKqbNKqYfrdW3CjcPYvJEbePsO4+FsNZcBnJ9Ns6U9TNDbSHfEb88+KFc0F+Yy7Ow23vJ7on7ms0UKpTKnppbY1ROxh95YcxEmFpeZSxdcSqdgVCAd+tRDfE0a0YQNQj09hQLwgNb6dmAf8IhS6l7gd7TWe7XW+4AngF8DUErtBh4FbgUeAf5EKSV1fhucS2avgaVOOu4wChfiGXvwfbujtHRsIUehVGG7OdWs26wamlsqcCmRYXtnNWFsNaAdGDZKWld7CmAko2VesrBRqJtR0AaWP99k/mit9ZLjayGM2c9gzH3+kta6oLUeBi4Ad9fr+oT1RzJXrFmzQkK7eiN0RXyMzhtGYaVcYWJx2Rabc/YbnDFzENaoy25TxvpiPEMiU3Q1mgW8DbSHffYAnc2tkkwWNjZ1zSkopRqUUkeAOeBJrfWL5vpjSqlx4MOYngLQB4w7Dp8w14QNwJ89d4l9n3nS5QmAkWTujwWI+JvY1Bq0w0eTi8uUK5pN5pt9Z7OPOdMoPH12jmZ/o51MtvoLDo4sAsY4TScDrQGSZgJ7k+QOhA1OXY2C1rpshon6gbuVUnvM9U9qrQeAvwZ+0fy6WusUqxeUUh9VSh1USh2Mx+P1unThGvMb3zgNwLGJlGv99PQSu83O4YHWoG00Rs1/bjbf+juafSxkixRLFb5zJs47b+mkqcH4z9sKH1ldz6slKQZixu+tIS/RgCiaChuba1J9pLVOAs9g5Aqc/A3wQfPzBDDg2OsHptY41+e11vu11vs7OjrqcLXCtcYpZnd2phpdzBUNNVNLTmJza4jppTyFUtmuQtpsh4+MB/+lRIZEpsBtfVH7PM3+JkLeBg5YRiHmNgpWs1qzX0pOBaGe1UcdSqkW83MAeAg4o5Ta7vja+4Az5uevA48qpXxKqSFgO3CgXtcnXHsqFc1SfqVm3ZKZADg9U5WrODuTRuuqxlBP1I/WRsL4wPACnc0+e8KZJU53aNQIEa32BrrNYyP+RtrD7qTx+/YZBXCrpbEFYSNSz1ejHuCLZgWRB3hca/2EUup/KaVuASrAKPBzAFrrk0qpx4FTQAn4mNa6XMfrE64x//O5S/zmN8/wg0884JKhthLEjR7FGYencHraMBBWbqDDNAAzS3m+dyHBgzu7UMqIOlpVRFbeYHUVUU80wMV4lm2dYfsYix1dzXz9F+9zTUsThI1K3YyC1voYcMca6x9c4+vW3mPAY/W6JuH68tSZOQD+8vsj/Mcf3mWvxzOGUXjr9naeORsnnV+h2d/E6eklwr5GOzFseQPfOTNHMrfC23a02+foNA3GwdG18wbWOZwzEJzs7W95w/cnCDcD0tEsXDVWyhX+8KnzrjkGq/cBTky6k8mWp2B1D1vyFefn0q43e+tN3hLGu29b1Si0h42H/fjCMp3NvhpJip97+1baQl4e2dP9+m9QEDYAYhSEq8bBkUV+78lzvPN3n6FQqo38WcbC6jWwuGRqFr15iyFzfdEUtrswl3U1mrWFvDR4FPF0gZ3dzbYhAGhq8Ng6Rrd0V/WKLAbbQxz81EP86B39b+QWBeGmR4yCcNU4P1dNEv+fw+7CsYnFHMncCs3+RqZTyxTN2cYL2SJ/9r1hNrcFGTIlJuLpAslckUSmwPauqlHweJT94L9zc6zm71uKpzvXMApATS5BEIRaxCgIr5nf/dZZPvm14zXr52czNPsbaQ15eXls0bX3zeMzAPzMfUNUtGEkAA6PLVIsVfjM+/cQ8DbQ7G9kbilvdzJv63TPMLAqhJwlp/aemTfYs8aeIAhXhhgF4TWhteaPnr7AX784ZucILM7NGjmAzW3BmhDRE8emuK0vaoeILNXSoxMpPAruGjTe/LsifubSBc6bRmF7p/ut/08+fCf/7PZefmh3V821/fcfv4Ov/sJbeO/e3po9QRCuDDEKwmvC+bB3Joy11pydTbOzO8JgW8ilZjq+kOPoRIr37O2xq3+siqOj40l2dDXbg206m33MLuU5P5vB3+Sp6R3obQnwh//iDtrCtVVE0WATd26K0eCRMJEgvF7EKAivicPj1bCQ1RMAMLtUIJlbYVdPM5tag0yllu1k8zeOTwPwntscRiFdQGvNsYkktzvKQS0NowvxDFs7wrbEtSAI1wYxCsJr4vR0Gm+Dh/5YwJaNADhtNp3t7I6wqTWI1jCVNEJE3zo5w+39UQZag0T8jXgbPMTTBcYXllnMrbB3oJoD6Ir6mV3Kc24m7ao8EgTh2iBGQXhNnJ5eYntXmDdvabMlJQDOmN3Ht3Q309Ni9BNMJw0l09PTS+wfbAWMCqAOU+b65JQRftrbV/UU+mNBVsqamaW8PQ9BEIRrhxgFYU3+x1Pn+bPnLtWsn50x8gZDHSEWskWWi0aI6MzMEn0tAaKBJnpNCYupVJ7xhRz5lQq3OB7w7c0+4pmCPUBnS0d1hsGAQ9Z6a4d4CoJwrRFZSKGGhWyR33/yHABv2hzjjk1GZVB+pcxcusCm1qDdE7CYKxLwBjgznbb7Ayyp6unkMmdN5dEdjt6BjrCPicUcw4ksXREfIUf3sVOeYq0mNEEQ6ot4ChuUSkVzx2e+zR8/faFm76nTs/bnk1NVgbpps4y0LxYgFjSMwoI5+/hiPMPOHuMh7m9qoDXkZSqV56yperp6BGYiU2A4kbUb1iyc1UaDMvBGEK45YhQ2KGdn0yzmVvidb52t2Ts5tUTQ24C30WPPLQBj2hkYD26rs3ghW+TiXJZSRbOzO2J/tyfqZya1zNmZNJvbgi5voKPZx3y2yPnZdI1R8Dc18H+/dYi//Om7pANZEK4DEj7aoDx/cd7+rLV2PYBPTy+xs7uZTKHEiKMvYdzsQu6PBSiajWuLphwFwK6earinJxpgYjFHsezOJ4BhFLSGpXypxigAfOq9u6/CHQqC8HoQT+EmJlMo8WN/+jxPm5LVTqzh9gCTyWX7s9aaMzNpdvZE2NQasj0FrTVffmmc3qif3pYArY7w0dlZo0x1sK36gO9t8XMpkWUkkWVnT9WDACOnYDHULslkQVhPiFG4ifnSgTFeGlnkZ77wkmvkJbg7kycWq0ZhZilPanmFXd3N9EQNyQlr/ch4kp+5b4gGjyIaaMKjYDFbZGw+x0BrgMaG6n9OPdEAxVKFioZd3as9herks6F2yRsIwnpCjMJNzHfPJ+zPw4mMa29kPsudm4z+AEuHCKr9Bjt7IrSFvSRzK6yUK/b6PvMYS7E0nikwvpiz5xxb9LZUp5jVegrVvdXDcARBuL6IUbiJGZvPssOUnj42UdUpyhVLzKUL3GOK002lqp6C1Zl8S3ezrS+0mC261i0G20JcimdNT2G1UahWEa02GH2xAB+5f4gv/uzd+Bob3vB9CoJw9ZBE801KqVxhYnGZf3X/EBOLyxyfTPGBO40BM9bs4719USL+Rpen8Ny5BFs6QkT8TbSbFUaJTJHzsxl6o34i/ib7u1s7wnzt8CTFcqXmwX97fws//46tbGoN1gjUNXgUn3yPJJMFYT0iRuEmZSqZp1TRbG0P09sSYHbJePCvlCv8t386h1Jw75Y2eqIBu/9gdinPC8Pz/NID2wFsT2EhW2QyuUx/zP3g39oZsquQVlcReRs9/IdHdtb1HgVBuPpI+GidsFwsc3jVYBowmsxWypWaRLFz/2I8g9bu/YtmDmFzW5BooInU8goAT5+Z47nzCfb2RYmFvPS0+Jk2w0fPnJ1Da3j3bcYc47aw4SnMZwvMLuXpivpdf8OpTSTdx4JwcyBGYZ3wmSdO8qN/8jxjjqqgYqnCfb/1HbZ/8pvc8Zlvc2h0oea4X/3qMR78vWf5wvMjrvUTZg5hV2/EZRQsvaEv/uzdgNVkZngKz56L0x3x230FlpRFPF1gOpWnZ5VRuNsUuQNq5h4IgnBjIkZhnWDNJvju+bi9tpAtMp3K8/CtXTT7m3jsG6drjvvOGeP7//2p81Qc3sTxyRRb2o3cgNMojM5naQ15aTH7DLojARIZQ6rixOQS+wdjdiNbNNCEt8HD+dkMxVKFrojbKIR8jQy0BuhrCUj3sSDcJIhRWCek8yUAXrhU7TROLhcBeN/tfbzjlg6GE1nXMdZw+109EZK5Fc7Npe29MzNpdvUapaDRQBPJnGUUcmx2aApZMtcTi8tMLOZcuQGlFJ0RH0cnksZ3V3kKAE9+/O089W/e/vpvXBCEdYUYhWtItlCqebADLOVXmFmqJnstUuaDvCXYRG9LgMXcCrliyd635hj/y7sHAHhp2Agv5VfKjC/mbBG6SKCJdL5EuaIZnc+5Oo+tB/3BkQUqGja3uRPG3RE/Z0xRu4FYbU+Bv6kBf5OUlQrCzULdjIJSyq+UOqCUOqqUOqmU+rS5/jtKqTNKqWNKqa8ppVocx3xCKXVBKXVWKfVwva7tevGRvzrIO3/3GeZNrSCLcXOecaNHkcgU7fWkGfKJBproN+cMTDkkKV42h9w8tLuLQFOD3aU8nMiidXUeQUvAKCOdzxSYSi27PQXTKPzA1EJarUxqJZeVgm0yCU0Qbnrq6SkUgAe01rcD+4BHlFL3Ak8Ce7TWe4FzwCcAlFK7gUeBW4FHgD9RSt1wr6BfOjDGf/2H2tj/7FLeFqH78sFx154lM7FvoIV4umowUg6jYDWDOSUp/uH4NHv7o/REDdXSRdOzuGB6ENZDPGoaheOTKbTG5Sl0mwNxfmCGrQbbaz0FMBrQAt4b7n8OQRBeI3UzCtrA0lZoMn+01vrbWmsrBvIC0G9+fj/wJa11QWs9DFwA7q7X9dWLX/3qcT7/3Uv2RDILZ7np0fGka89pFDKFEvkV49jV4SOozj3Or5Q5Ppni7Ts67O8s5gwv48JcBqWqvQOWUbD+7iaHNxD2NdLsb2R2qUDY12hXHFlYVUXbZAqaIGwI6tq8Zr7pHwK2AX+stX5x1Vd+Fviy+bkPw0hYTJhr645PfPU45UqF3/7nt7vWndU/h8cWecu2dvv3YxMpGj2K+7a122/yFhOLOULeBrabkhTxdIGB1iCp5RUaPIqwr9GWg7A8iYvxDBVd7Q9oDXlZyBbtvYFY0I71d0aMJrQXzZzD5lXdxz1RP+l8hs1twZoqon++v59ooIn7HPciCMLNS10TzVrrstZ6H4Y3cLdSao+1p5T6JFAC/tpaWusUqxeUUh9VSh1USh2Mx+NrHFJfKhXN3x4Y4/GDE/ZD2GJsodpjcNShNQRwYmqJHV3N3NobYXQ+x4rZCQzGm/1ge4iOZuPhbc0nSC4XiQaaUErhbfQQCzYxlzY8hfOzhmHZYfYUxIJekg5PYatj7rH1tn9odJGAORXNSY8ZQhpsq51tEPE38cE39dsjNgVBuLm5JtVHWusk8AxGrgCl1E8B7wU+rKutuBPAgOOwfmBqjXN9Xmu9X2u9v6Ojo67XvRaXHGqjz513G6UzM9UZBc6EMMDFuQw7usJs7QhTqmiXATkzk2ZXT4R2U1bC8gYWskU7SQzQ2VyVsj49s0RTg7If5LFgkz0acziRdSWFW0NefI0eShVNV8RX4w2EzTnKu3vdaqaCIGw86ll91GFVFimlAsBDwBml1CPAfwDep7XOOQ75OvCoUsqnlBoCtgMH6nV9r5cTk9UHvzPpC3BqOo1HwZb2kGtwzXKxzGRymS0dYbsvwOoinknliacL7Oxuto2CVYF0YS7DFscbf2fEZxuMA8ML7O1vwdto/E8YC3lZypf41slZCqWKK9yjlLK9hdUNaAAfuX8LH39oBx+5f8vr/LciCMLNQj1zCj3AF828ggd4XGv9hFLqAuADnjTfWF/QWv+c1vqkUupx4BRGWOljWuvyK538ejFhjqQMeRvszxZnppcYbA+xpT3s2rN6E7Z0hOxQjWUUfvtbZ2hqULzjlg5bayiRKVAsVbgUz/Lgri77PB1hH5fiWXLFEscnUnz0bdWHuBUS+trLE7SGvNy/3e1F9cUCXEpk1wwD7RtoYd9AS826IAgbj7oZBa31MeCONda3XeaYx4DH6nVNr4VvHJtmc1uQPX1R1/pUKk9ryMtALFDjKZyZSXNbf5T2kJcXh6udyScmjfzCLV3Ndomn1az2g4vz/PBtPWzrNHID0UATiUyBkfkspYp2zTfuMD2FU1NLlCqaOzfF7D2r6eyZc3HuGGipkave0xflufMJvA3SrygIwisjT4g1WC6W+djfvMx7//B7FEpuZ2U6uUxP1E9/LMikwyik8yuMLeTY1d1Mb0uAdL7EUt4oKf3BpXnaw162dYYJeBuI+BuZXcqTzq8wncrbyWKA9rCXeLrApXjVu7DobPZTLFfsKqJdjhzAHvOz1tCzhjjdQ6bHEZReA0EQLoMYhTV4aaSqRnpsVRXRVDJPb0uA7qjfftsHODdrjrHsjtg9BdNmT8GB4QXuGWqzE7zdaWJl3wAADHdJREFUpjLpRfPB70wKt4d9JDIFO/zkHF5jVSc9ey5Os7+RXkcoqC3ss7uT11IsfdPmGH/+U/v5Nw/f8pr+XQiCsLHYsEZBa81XDo7XVAkBrtCPc79S0Uwml+mN+mkNeckVy3ajmZWA3tUboc+UpJhM5lg0B9TcPlANQ3VF/Mwu5TlvGpLtDqPQ0WyEiCYWlwn7Gu3GM4BO0ygcGF5gd0+kpopolzkLeXXJqcWDu7pck9MEQRBWs2GNwt8dmuDf/d0x/ujpCzV7J6eWGGg13/YdoyqPT6bIFErcsSlGzJSettRHDwwv0Bv10xv122/qk8k8p6dNY+EYXt8dMbyMC/EM3gaPyxvobQkwlcoztpCjP+aWpLY8BYC7HLMMLP75m4zmcJltIAjC62XDGoVvnZwBjFnGTrTWHB5LctdgK80+9/zip8/OoRS8bUcHrSHjjXshW0RrzYvD89yzxQgRdYR9NDUoppLLnFrLKET9xNMFzkynGWoP0ehI/vbHAhRLFV4eW2RgVedxp8MovGkwxmp++LYevvmv7+e9e3te778WQRA2OBvWKFhSE04BOoDPfvMMqeUVbuuL0tPid4WPnj4bZ99AC60hr+0pLOaKXIxnSWSK3D1kvL17PIquiHHsqakluiI+uwcBDKNQ0UYCeluXW1PIUkNN5lbYsWov7KsWi9071Lbmfe1aI6wkCIJwpdRV+2i9kl8p2x3Fc6uMwstji7QEm3j0rk08czZuh48WskWOTST55Qd3ANW4/UK2yMi8kTC+Z6ga0uk28wbJ3Aq7e9ydwlZZarFUceUTAPpaqt7B7h53OaxSio/cP8SevqgolgqCUBc2pFEYTmSpaOPNe3bJbRQuxbO8e083AW8DPVE/J6eM6qMj44toDfduMR78sVDVUzg+kaI97HVNLeuK+jkylmR2Kc8DOztdf8PZVbx6RoHlKcDashOffM/u13PLgiAIV8SGNAqRQBO/9MA2xheX+drhSYYTWYbaQ6RyK8xni7aeUE+0Or/46HgKj8JuZqsOrilyydQacoZtupr9ttTF6m5hpyHY3tns2gv5Gvnzn9rPsYlUzcAbQRCEerMhcwp9LQF+5Ydu4cFdxhv8f/zqcaAqdmcNmrF0imZTBQ6PJ9nWGSZkxvUbGzy0h73MpQumUXG/8XdHqzmEe7a44//O8ZWD7bUP/gd3dfHxd+2Q3IAgCNecDekpWLx3by9PnZ7jH45Pk18p241qljdgNYONLeQ4OLLAB+/sdx3fHfVzZmaJhWyRLasmlrWYieiWYJOr18Di73/xrRybTNpzEgRBENYDG9JTcPLevT0UShWOjid5eWyRrojP7hS2xOv+4cQ0uWK5ZtBMTzTA4TFjmtnQKqOwt98wLL/1wb1r/t3b+qN8+J7NV/VeBEEQ3igb2lOA6pCa0fkcxyZS7BtoscM2/bEAHgXfPjkLGA9yJz0OmYmhDrdR2Nkd4dxvvNuWthYEQbgR2PBPrO6oH4+C0YUs4ws5tjpmEfubGhhsC5HIFGjwKLoczWPWsRYDsdrcgBgEQRBuNDb8U6upwUN3xM9Lw4uUKtolOQHYc5O7I35X5zG4K4fEAAiCcDMgTzKMATQHTGXUTavKQG/pNnoFfE21/6qs/oO3bF27u1gQBOFGQ4wC7r6B1Z7C+27vBWBpuVRzXINHcfLTD/MXP31XfS9QEAThGrHhE80A/+7hnbSHfQS8DTUKo9s6w3zqPbu4Y9Pa4ypDPvlXKAjCzYPSWl/va3jd7N+/Xx88ePB6X4YgCMINhVLqkNZ6/1p7Ej4SBEEQbMQoCIIgCDZiFARBEAQbMQqCIAiCjRgFQRAEwUaMgiAIgmAjRkEQBEGwEaMgCIIg2NzQzWtKqTj8/+2dfYwdVRnGfw9srdACAUtNBcJCQoEaKEKDmKA0GCAqoRJowBYlwcTEIMEvDEQIf+gfSoxRUg2aSCkokAgkYKI00PBhKN9Nt7aU8qmwuqEiEFcQYbevf5xzp8N6723u7sydi31+yWTPPWfOnWeePXvfPWdm3stf+nzYecCrfT5mJwZJCwyWnkHSAtbTjUHSAoOlpy4th0bEge0a3tdBoQkkPdHpScB+M0haYLD0DJIWsJ5uDJIWGCw9TWjx8pExxpgCBwVjjDEFDgq988umBZQYJC0wWHoGSQtYTzcGSQsMlp6+a/E1BWOMMQWeKRhjjClwUDDGGFOw2wcFSddL2i5pc6lusaSHJf1J0u8k7VtqOza3bcntH8z1J+TXz0m6VpIa1nO/pG2SNuZtfp1aJK0sHWujpB2SjmvKm13o6bc3syStyfVbJV1R6tOEN930zNibaej5gKTVuX5E0tJSnxn7U6GWKsbNIZLuy75vkXRprj9A0j2Sns0/9y/1uSKf/zZJZ1TpTVsiYrfegE8BxwObS3WPA6fk8kXA93J5CNgELM6vPwTsmcuPAZ8ABPwB+EzDeu4HlvTLmyn9jgFeKL3uuze70NNXb4AVwK25vDfwZ2C4wXHTTc+MvZmGnouB1bk8H3gS2KMqfyrUUsW4WQAcn8v7AM8Ai4BrgMtz/eXAD3N5ETACzAYOA56n4s+cqdtuP1OIiAeB16ZUHwk8mMv3AOfk8unApogYyX3/ERGTkhYA+0bEw5F+WzcCn29Kz3SOW4GWMl8AbgFo0Ju2eqqiRy0BzJE0BOwFvAP8s0Fv2uqZznEr0rMIWJf7bQfeAJZU5U8VWno9ZhctYxGxIZfHga3AQcAyYE3ebQ07z3MZKYD/JyJeBJ4DTqxy7Exltw8KHdgMnJXLy4FDcnkhEJLWStog6Tu5/iBgtNR/NNc1pafF6jzNvaqyqWVnLWXOY+eHcFPedNLTop/e3Aa8CYwBLwE/iojXaM6bTnpa1OFNNz0jwDJJQ5IOA07IbXX606uWFpV5I2kY+BjwKPDhiBiDFDhIsxRI5/tyqVvLg9q8cVBoz0XAxZKeJE3x3sn1Q8DJwMr882xJnyZN36ZS5b2+veoBWBkRxwCfzNsXa9YCgKSPA29FRGv9tilvOumB/ntzIjAJfIS0BPAtSYfTnDed9EB93nTTcz3pQ+0J4CfAemCCev3pVQtU6I2kucDtwNcjotssrZMHtXkzVMWb/L8REU+TlmaQtBD4XG4aBR6IiFdz2+9Ja5W/Bg4uvcXBwN8a1LMuIv6a+45Lupn0QXBjjVpanM97/ysfpRlvOumhAW9WAHdHxLvAdkkPkZYk/kgz3nTS80Jd3nTTExETwDda+0laDzwLvE5N/kxDS2XjRtIsUkD4TUTckatfkbQgIsby0tD2XD/Ke2cqLQ9q+7vyTKENrbsKJO0BXAlcl5vWAsdK2juvx54CPJWne+OSTspTyi8BdzalJ0995+U+s4AzSdPlOrW06pYDt7bqGvSmrZ6GvHkJOFWJOcBJwNMNetNWT53edNOTx++cXD4NmIiIWv+uetVSlTf5PH4FbI2IH5ea7gIuzOUL2XmedwHnS5qdl7OOAB6rdexUcbX6/byR/oscA94lRd8vA5eS7gp4BvgB+cnvvP8FwBbSgLimVL8k1z0PrCr36bceYA7prolNue2n5DsWatayFHikzfs05c3/6GnCG2Au8Nt8vKeAy5r0ppOeqryZhp5hYBvpouu9pLTOlflThZYKx83JpGWeTcDGvH2WdOfgOtKsZB1wQKnPd/P5b6N0h1FVY2fq5jQXxhhjCrx8ZIwxpsBBwRhjTIGDgjHGmAIHBWOMMQUOCsYYYwocFIzpAUmTOc3BFqUsmt/M97p36zMsaUW/NBozExwUjOmNf0fEcRHxUeA00j3mV++izzDpKWJjBh4/p2BMD0j6V0TMLb0+nJSGeR5wKHAT6UEngK9FxHpJjwBHAy+SMmBeS3pgaikpJfLPIuIXfTsJY7rgoGBMD0wNCrnudeAoYBzYERFvSzoCuCUilih9Ucu3I+LMvP9XgPkR8X1Js4GHgOWRUiMb0yhOiGfMzGllrJwFrFL6hrdJUmrzdpxOyll1bn69HymnjYOCaRwHBWNmQF4+miRltbwaeAVYTLpe93anbsAlEbG2LyKN6QFfaDZmmkg6kJRdc1Wkddj9gLGI2EHKtb9n3nWclLO/xVrgqznbJpIWtjJzGtM0nikY0xt7SdpIWiqaIF1YbqVA/jlwu6TlwH2kbzeDlBFzQtIIcAMpw+YwsCGnPf47FX2VojEzxReajTHGFHj5yBhjTIGDgjHGmAIHBWOMMQUOCsYYYwocFIwxxhQ4KBhjjClwUDDGGFPwX8f58wJ97NOyAAAAAElFTkSuQmCC",
+ "text/plain": [
+ "
"
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "train_df\n",
+ "\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "plt.plot(train_df['index'], train_df['co2'])\n",
+ "plt.xlabel('Date')\n",
+ "plt.ylabel('CO2 Levels')\n",
+ "plt.show()"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -187,7 +115,7 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
@@ -198,7 +126,7 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
@@ -214,1138 +142,938 @@
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
- "[flaml.automl: 02-28 21:28:18] {2060} INFO - task = ts_forecast\n",
- "[flaml.automl: 02-28 21:28:18] {2062} INFO - Data split method: time\n",
- "[flaml.automl: 02-28 21:28:18] {2066} INFO - Evaluation method: holdout\n",
- "[flaml.automl: 02-28 21:28:18] {2147} INFO - Minimizing error metric: mape\n",
- "[flaml.automl: 02-28 21:28:18] {2205} INFO - List of ML learners in AutoML Run: ['lgbm', 'rf', 'xgboost', 'extra_tree', 'xgb_limitdepth', 'prophet', 'arima', 'sarimax']\n",
- "[flaml.automl: 02-28 21:28:18] {2458} INFO - iteration 0, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2573} INFO - Estimated sufficient time budget=2854s. Estimated necessary time budget=3s.\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.0s,\testimator lgbm's best error=0.0621,\tbest estimator lgbm's best error=0.0621\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 1, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.0s,\testimator lgbm's best error=0.0621,\tbest estimator lgbm's best error=0.0621\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 2, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.0s,\testimator lgbm's best error=0.0277,\tbest estimator lgbm's best error=0.0277\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 3, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.0s,\testimator lgbm's best error=0.0277,\tbest estimator lgbm's best error=0.0277\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 4, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.1s,\testimator lgbm's best error=0.0175,\tbest estimator lgbm's best error=0.0175\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 5, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.1s,\testimator lgbm's best error=0.0055,\tbest estimator lgbm's best error=0.0055\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 6, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.1s,\testimator lgbm's best error=0.0055,\tbest estimator lgbm's best error=0.0055\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 7, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.1s,\testimator lgbm's best error=0.0055,\tbest estimator lgbm's best error=0.0055\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 8, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.2s,\testimator lgbm's best error=0.0031,\tbest estimator lgbm's best error=0.0031\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 9, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.2s,\testimator lgbm's best error=0.0031,\tbest estimator lgbm's best error=0.0031\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 10, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.2s,\testimator lgbm's best error=0.0027,\tbest estimator lgbm's best error=0.0027\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 11, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.2s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 12, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.3s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 13, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.3s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 14, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.3s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 15, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.3s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 16, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.3s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 17, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.4s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 18, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.4s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 19, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.4s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 20, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.5s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 21, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.5s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 22, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.5s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 23, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.5s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 24, current learner rf\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.6s,\testimator rf's best error=0.0210,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 25, current learner rf\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.6s,\testimator rf's best error=0.0210,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 26, current learner rf\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.6s,\testimator rf's best error=0.0210,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 27, current learner rf\n",
- "[flaml.automl: 02-28 21:28:19] {2620} INFO - at 1.7s,\testimator rf's best error=0.0143,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:19] {2458} INFO - iteration 28, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:21] {2620} INFO - at 3.8s,\testimator xgboost's best error=0.6738,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:21] {2458} INFO - iteration 29, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:21] {2620} INFO - at 3.8s,\testimator extra_tree's best error=0.0220,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:21] {2458} INFO - iteration 30, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 3.9s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 31, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 3.9s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 32, current learner rf\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 3.9s,\testimator rf's best error=0.0102,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 33, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.0s,\testimator extra_tree's best error=0.0220,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 34, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.0s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 35, current learner rf\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.0s,\testimator rf's best error=0.0102,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 36, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.1s,\testimator extra_tree's best error=0.0220,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 37, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.1s,\testimator xgboost's best error=0.6738,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 38, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.1s,\testimator extra_tree's best error=0.0220,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 39, current learner rf\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.2s,\testimator rf's best error=0.0051,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 40, current learner rf\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.2s,\testimator rf's best error=0.0051,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 41, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.2s,\testimator extra_tree's best error=0.0220,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 42, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.2s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 43, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.3s,\testimator xgboost's best error=0.1712,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 44, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.3s,\testimator extra_tree's best error=0.0136,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 45, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.3s,\testimator xgboost's best error=0.0257,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 46, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.3s,\testimator xgboost's best error=0.0257,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 47, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.4s,\testimator xgboost's best error=0.0242,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 48, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.4s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 49, current learner rf\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.4s,\testimator rf's best error=0.0051,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 50, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.4s,\testimator extra_tree's best error=0.0136,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 51, current learner rf\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.5s,\testimator rf's best error=0.0032,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 52, current learner rf\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.5s,\testimator rf's best error=0.0032,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 53, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.6s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 54, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.6s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 55, current learner rf\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.6s,\testimator rf's best error=0.0020,\tbest estimator rf's best error=0.0020\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 56, current learner rf\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.7s,\testimator rf's best error=0.0020,\tbest estimator rf's best error=0.0020\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 57, current learner rf\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.7s,\testimator rf's best error=0.0020,\tbest estimator rf's best error=0.0020\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 58, current learner rf\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.7s,\testimator rf's best error=0.0020,\tbest estimator rf's best error=0.0020\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 59, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.8s,\testimator lgbm's best error=0.0022,\tbest estimator rf's best error=0.0020\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 60, current learner rf\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.8s,\testimator rf's best error=0.0018,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 61, current learner rf\n",
- "[flaml.automl: 02-28 21:28:22] {2620} INFO - at 4.8s,\testimator rf's best error=0.0018,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:22] {2458} INFO - iteration 62, current learner rf\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 4.9s,\testimator rf's best error=0.0018,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 63, current learner rf\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 4.9s,\testimator rf's best error=0.0018,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 64, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 4.9s,\testimator lgbm's best error=0.0022,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 65, current learner rf\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.0s,\testimator rf's best error=0.0018,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 66, current learner rf\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.0s,\testimator rf's best error=0.0018,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 67, current learner rf\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.1s,\testimator rf's best error=0.0018,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 68, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.1s,\testimator lgbm's best error=0.0022,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 69, current learner rf\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.1s,\testimator rf's best error=0.0018,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 70, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.2s,\testimator xgboost's best error=0.0242,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 71, current learner rf\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.2s,\testimator rf's best error=0.0018,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 72, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.2s,\testimator xgb_limitdepth's best error=0.0447,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 73, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.2s,\testimator xgb_limitdepth's best error=0.0447,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 74, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.3s,\testimator xgb_limitdepth's best error=0.0029,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 75, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.3s,\testimator xgb_limitdepth's best error=0.0029,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 76, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.3s,\testimator xgboost's best error=0.0242,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 77, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.4s,\testimator xgboost's best error=0.0191,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 78, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.4s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 79, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.4s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 80, current learner rf\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.4s,\testimator rf's best error=0.0018,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 81, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.5s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 82, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.5s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 83, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.5s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 84, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.5s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 85, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.6s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 86, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.6s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 87, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.6s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 88, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.6s,\testimator xgboost's best error=0.0191,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 89, current learner rf\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.7s,\testimator rf's best error=0.0018,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 90, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.7s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 91, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.7s,\testimator xgboost's best error=0.0103,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 92, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.7s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 93, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.8s,\testimator xgboost's best error=0.0081,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 94, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.8s,\testimator extra_tree's best error=0.0074,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 95, current learner rf\n",
- "[flaml.automl: 02-28 21:28:23] {2620} INFO - at 5.9s,\testimator rf's best error=0.0018,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:23] {2458} INFO - iteration 96, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:28:24] {2620} INFO - at 5.9s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:24] {2458} INFO - iteration 97, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:24] {2620} INFO - at 5.9s,\testimator extra_tree's best error=0.0074,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:24] {2458} INFO - iteration 98, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:24] {2620} INFO - at 5.9s,\testimator extra_tree's best error=0.0074,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:24] {2458} INFO - iteration 99, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:24] {2620} INFO - at 6.0s,\testimator xgboost's best error=0.0081,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:24] {2458} INFO - iteration 100, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:24] {2620} INFO - at 6.0s,\testimator xgboost's best error=0.0081,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:24] {2458} INFO - iteration 101, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:28:24] {2620} INFO - at 6.0s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:24] {2458} INFO - iteration 102, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:24] {2620} INFO - at 6.1s,\testimator extra_tree's best error=0.0060,\tbest estimator rf's best error=0.0018\n",
- "[flaml.automl: 02-28 21:28:24] {2458} INFO - iteration 103, current learner prophet\n",
- "[flaml.automl: 02-28 21:28:29] {2620} INFO - at 11.5s,\testimator prophet's best error=0.0008,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:29] {2458} INFO - iteration 104, current learner arima\n",
- "[flaml.automl: 02-28 21:28:30] {2620} INFO - at 12.0s,\testimator arima's best error=0.0047,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:30] {2458} INFO - iteration 105, current learner sarimax\n",
- "[flaml.automl: 02-28 21:28:30] {2620} INFO - at 12.4s,\testimator sarimax's best error=0.0047,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:30] {2458} INFO - iteration 106, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:30] {2620} INFO - at 12.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:30] {2458} INFO - iteration 107, current learner arima\n",
- "[flaml.automl: 02-28 21:28:30] {2620} INFO - at 12.5s,\testimator arima's best error=0.0047,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:30] {2458} INFO - iteration 108, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:30] {2620} INFO - at 12.6s,\testimator xgboost's best error=0.0041,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:30] {2458} INFO - iteration 109, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:30] {2620} INFO - at 12.7s,\testimator xgboost's best error=0.0041,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:30] {2458} INFO - iteration 110, current learner sarimax\n",
- "[flaml.automl: 02-28 21:28:30] {2620} INFO - at 12.7s,\testimator sarimax's best error=0.0047,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:30] {2458} INFO - iteration 111, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:30] {2620} INFO - at 12.8s,\testimator xgboost's best error=0.0029,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:30] {2458} INFO - iteration 112, current learner sarimax\n",
- "[flaml.automl: 02-28 21:28:31] {2620} INFO - at 13.2s,\testimator sarimax's best error=0.0047,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:31] {2458} INFO - iteration 113, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:31] {2620} INFO - at 13.2s,\testimator extra_tree's best error=0.0060,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:31] {2458} INFO - iteration 114, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:31] {2620} INFO - at 13.3s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:31] {2458} INFO - iteration 115, current learner sarimax\n",
- "[flaml.automl: 02-28 21:28:32] {2620} INFO - at 13.9s,\testimator sarimax's best error=0.0047,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:32] {2458} INFO - iteration 116, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:32] {2620} INFO - at 13.9s,\testimator extra_tree's best error=0.0034,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:32] {2458} INFO - iteration 117, current learner sarimax\n",
- "[flaml.automl: 02-28 21:28:32] {2620} INFO - at 14.0s,\testimator sarimax's best error=0.0047,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:32] {2458} INFO - iteration 118, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:32] {2620} INFO - at 14.1s,\testimator xgboost's best error=0.0029,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:32] {2458} INFO - iteration 119, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:32] {2620} INFO - at 14.1s,\testimator xgboost's best error=0.0028,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:32] {2458} INFO - iteration 120, current learner arima\n",
- "[flaml.automl: 02-28 21:28:32] {2620} INFO - at 14.5s,\testimator arima's best error=0.0047,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:32] {2458} INFO - iteration 121, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:32] {2620} INFO - at 14.6s,\testimator extra_tree's best error=0.0033,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:32] {2458} INFO - iteration 122, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:32] {2620} INFO - at 14.6s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:32] {2458} INFO - iteration 123, current learner arima\n",
- "[flaml.automl: 02-28 21:28:32] {2620} INFO - at 14.7s,\testimator arima's best error=0.0044,\tbest estimator prophet's best error=0.0008\n",
- "[flaml.automl: 02-28 21:28:32] {2458} INFO - iteration 124, current learner prophet\n",
- "[flaml.automl: 02-28 21:28:35] {2620} INFO - at 17.4s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:35] {2458} INFO - iteration 125, current learner sarimax\n",
- "[flaml.automl: 02-28 21:28:35] {2620} INFO - at 17.6s,\testimator sarimax's best error=0.0041,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:35] {2458} INFO - iteration 126, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:35] {2620} INFO - at 17.6s,\testimator extra_tree's best error=0.0032,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:35] {2458} INFO - iteration 127, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:35] {2620} INFO - at 17.6s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:35] {2458} INFO - iteration 128, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:35] {2620} INFO - at 17.6s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:35] {2458} INFO - iteration 129, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:35] {2620} INFO - at 17.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:35] {2458} INFO - iteration 130, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:35] {2620} INFO - at 17.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:35] {2458} INFO - iteration 131, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:35] {2620} INFO - at 17.8s,\testimator xgboost's best error=0.0027,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:35] {2458} INFO - iteration 132, current learner prophet\n",
- "[flaml.automl: 02-28 21:28:38] {2620} INFO - at 20.2s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:38] {2458} INFO - iteration 133, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:38] {2620} INFO - at 20.2s,\testimator extra_tree's best error=0.0032,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:38] {2458} INFO - iteration 134, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:38] {2620} INFO - at 20.3s,\testimator extra_tree's best error=0.0032,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:38] {2458} INFO - iteration 135, current learner prophet\n",
- "[flaml.automl: 02-28 21:28:40] {2620} INFO - at 22.6s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:40] {2458} INFO - iteration 136, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:40] {2620} INFO - at 22.6s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:40] {2458} INFO - iteration 137, current learner rf\n",
- "[flaml.automl: 02-28 21:28:40] {2620} INFO - at 22.6s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:40] {2458} INFO - iteration 138, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:40] {2620} INFO - at 22.7s,\testimator extra_tree's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:40] {2458} INFO - iteration 139, current learner prophet\n",
- "[flaml.automl: 02-28 21:28:43] {2620} INFO - at 25.4s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:43] {2458} INFO - iteration 140, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:43] {2620} INFO - at 25.4s,\testimator extra_tree's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:43] {2458} INFO - iteration 141, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:43] {2620} INFO - at 25.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:43] {2458} INFO - iteration 142, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:43] {2620} INFO - at 25.5s,\testimator extra_tree's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:43] {2458} INFO - iteration 143, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:43] {2620} INFO - at 25.5s,\testimator extra_tree's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:43] {2458} INFO - iteration 144, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:43] {2620} INFO - at 25.6s,\testimator extra_tree's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:43] {2458} INFO - iteration 145, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:43] {2620} INFO - at 25.6s,\testimator extra_tree's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:43] {2458} INFO - iteration 146, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:43] {2620} INFO - at 25.7s,\testimator extra_tree's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:43] {2458} INFO - iteration 147, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:43] {2620} INFO - at 25.7s,\testimator extra_tree's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:43] {2458} INFO - iteration 148, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:43] {2620} INFO - at 25.7s,\testimator extra_tree's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:43] {2458} INFO - iteration 149, current learner rf\n",
- "[flaml.automl: 02-28 21:28:43] {2620} INFO - at 25.8s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:43] {2458} INFO - iteration 150, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:43] {2620} INFO - at 25.8s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:43] {2458} INFO - iteration 151, current learner rf\n",
- "[flaml.automl: 02-28 21:28:43] {2620} INFO - at 25.8s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:43] {2458} INFO - iteration 152, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:44] {2620} INFO - at 25.9s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:44] {2458} INFO - iteration 153, current learner rf\n",
- "[flaml.automl: 02-28 21:28:44] {2620} INFO - at 25.9s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:44] {2458} INFO - iteration 154, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:44] {2620} INFO - at 25.9s,\testimator xgboost's best error=0.0027,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:44] {2458} INFO - iteration 155, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:44] {2620} INFO - at 26.0s,\testimator extra_tree's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:44] {2458} INFO - iteration 156, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:44] {2620} INFO - at 26.0s,\testimator extra_tree's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:44] {2458} INFO - iteration 157, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:44] {2620} INFO - at 26.0s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:44] {2458} INFO - iteration 158, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:44] {2620} INFO - at 26.1s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:44] {2458} INFO - iteration 159, current learner sarimax\n",
- "[flaml.automl: 02-28 21:28:44] {2620} INFO - at 26.2s,\testimator sarimax's best error=0.0040,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:44] {2458} INFO - iteration 160, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:44] {2620} INFO - at 26.3s,\testimator extra_tree's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:44] {2458} INFO - iteration 161, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:44] {2620} INFO - at 26.3s,\testimator xgboost's best error=0.0026,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:44] {2458} INFO - iteration 162, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:44] {2620} INFO - at 26.4s,\testimator xgboost's best error=0.0026,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:44] {2458} INFO - iteration 163, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:44] {2620} INFO - at 26.4s,\testimator extra_tree's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:44] {2458} INFO - iteration 164, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:44] {2620} INFO - at 26.5s,\testimator xgboost's best error=0.0026,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:44] {2458} INFO - iteration 165, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:44] {2620} INFO - at 26.5s,\testimator extra_tree's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:44] {2458} INFO - iteration 166, current learner sarimax\n",
- "[flaml.automl: 02-28 21:28:44] {2620} INFO - at 26.8s,\testimator sarimax's best error=0.0038,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:44] {2458} INFO - iteration 167, current learner arima\n",
- "[flaml.automl: 02-28 21:28:45] {2620} INFO - at 27.0s,\testimator arima's best error=0.0044,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:45] {2458} INFO - iteration 168, current learner rf\n",
- "[flaml.automl: 02-28 21:28:45] {2620} INFO - at 27.0s,\testimator rf's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:45] {2458} INFO - iteration 169, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:45] {2620} INFO - at 27.1s,\testimator xgboost's best error=0.0026,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:45] {2458} INFO - iteration 170, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:28:45] {2620} INFO - at 27.1s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:45] {2458} INFO - iteration 171, current learner arima\n",
- "[flaml.automl: 02-28 21:28:45] {2620} INFO - at 27.3s,\testimator arima's best error=0.0043,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:45] {2458} INFO - iteration 172, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:45] {2620} INFO - at 27.3s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:45] {2458} INFO - iteration 173, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:45] {2620} INFO - at 27.3s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:45] {2458} INFO - iteration 174, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:45] {2620} INFO - at 27.3s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:45] {2458} INFO - iteration 175, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:45] {2620} INFO - at 27.4s,\testimator xgboost's best error=0.0026,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:45] {2458} INFO - iteration 176, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:28:45] {2620} INFO - at 27.4s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:45] {2458} INFO - iteration 177, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:45] {2620} INFO - at 27.4s,\testimator extra_tree's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:45] {2458} INFO - iteration 178, current learner rf\n",
- "[flaml.automl: 02-28 21:28:45] {2620} INFO - at 27.5s,\testimator rf's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:45] {2458} INFO - iteration 179, current learner sarimax\n",
- "[flaml.automl: 02-28 21:28:46] {2620} INFO - at 28.3s,\testimator sarimax's best error=0.0038,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:46] {2458} INFO - iteration 180, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:46] {2620} INFO - at 28.4s,\testimator xgboost's best error=0.0026,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:46] {2458} INFO - iteration 181, current learner rf\n",
- "[flaml.automl: 02-28 21:28:46] {2620} INFO - at 28.4s,\testimator rf's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:46] {2458} INFO - iteration 182, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:46] {2620} INFO - at 28.5s,\testimator xgboost's best error=0.0026,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:46] {2458} INFO - iteration 183, current learner prophet\n",
- "[flaml.automl: 02-28 21:28:49] {2620} INFO - at 31.2s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:49] {2458} INFO - iteration 184, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:49] {2620} INFO - at 31.3s,\testimator extra_tree's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:49] {2458} INFO - iteration 185, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:49] {2620} INFO - at 31.3s,\testimator xgboost's best error=0.0026,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:49] {2458} INFO - iteration 186, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:49] {2620} INFO - at 31.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:49] {2458} INFO - iteration 187, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:49] {2620} INFO - at 31.4s,\testimator extra_tree's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:49] {2458} INFO - iteration 188, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:49] {2620} INFO - at 31.4s,\testimator extra_tree's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:49] {2458} INFO - iteration 189, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:49] {2620} INFO - at 31.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:49] {2458} INFO - iteration 190, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:49] {2620} INFO - at 31.5s,\testimator extra_tree's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:49] {2458} INFO - iteration 191, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:49] {2620} INFO - at 31.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:49] {2458} INFO - iteration 192, current learner rf\n",
- "[flaml.automl: 02-28 21:28:49] {2620} INFO - at 31.5s,\testimator rf's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:49] {2458} INFO - iteration 193, current learner prophet\n",
- "[flaml.automl: 02-28 21:28:52] {2620} INFO - at 34.8s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:52] {2458} INFO - iteration 194, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:52] {2620} INFO - at 34.8s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:52] {2458} INFO - iteration 195, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:52] {2620} INFO - at 34.9s,\testimator extra_tree's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:52] {2458} INFO - iteration 196, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:53] {2620} INFO - at 34.9s,\testimator xgboost's best error=0.0026,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:53] {2458} INFO - iteration 197, current learner arima\n",
- "[flaml.automl: 02-28 21:28:53] {2620} INFO - at 35.7s,\testimator arima's best error=0.0033,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:53] {2458} INFO - iteration 198, current learner rf\n",
- "[flaml.automl: 02-28 21:28:53] {2620} INFO - at 35.7s,\testimator rf's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:53] {2458} INFO - iteration 199, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:53] {2620} INFO - at 35.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:53] {2458} INFO - iteration 200, current learner arima\n",
- "[flaml.automl: 02-28 21:28:54] {2620} INFO - at 36.6s,\testimator arima's best error=0.0033,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:54] {2458} INFO - iteration 201, current learner prophet\n",
- "[flaml.automl: 02-28 21:28:57] {2620} INFO - at 39.2s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:57] {2458} INFO - iteration 202, current learner arima\n",
- "[flaml.automl: 02-28 21:28:57] {2620} INFO - at 39.4s,\testimator arima's best error=0.0033,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:57] {2458} INFO - iteration 203, current learner arima\n",
- "[flaml.automl: 02-28 21:28:58] {2620} INFO - at 40.0s,\testimator arima's best error=0.0033,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:58] {2458} INFO - iteration 204, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:58] {2620} INFO - at 40.0s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:58] {2458} INFO - iteration 205, current learner lgbm\n",
- "[flaml.automl: 02-28 21:28:58] {2620} INFO - at 40.0s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:58] {2458} INFO - iteration 206, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:28:58] {2620} INFO - at 40.0s,\testimator extra_tree's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:58] {2458} INFO - iteration 207, current learner arima\n",
- "[flaml.automl: 02-28 21:28:58] {2620} INFO - at 40.8s,\testimator arima's best error=0.0033,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:58] {2458} INFO - iteration 208, current learner arima\n",
- "[flaml.automl: 02-28 21:28:59] {2620} INFO - at 41.0s,\testimator arima's best error=0.0033,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:59] {2458} INFO - iteration 209, current learner xgboost\n",
- "[flaml.automl: 02-28 21:28:59] {2620} INFO - at 41.1s,\testimator xgboost's best error=0.0025,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:28:59] {2458} INFO - iteration 210, current learner prophet\n",
- "[flaml.automl: 02-28 21:29:01] {2620} INFO - at 43.6s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:01] {2458} INFO - iteration 211, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:01] {2620} INFO - at 43.7s,\testimator extra_tree's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:01] {2458} INFO - iteration 212, current learner rf\n",
- "[flaml.automl: 02-28 21:29:01] {2620} INFO - at 43.7s,\testimator rf's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:01] {2458} INFO - iteration 213, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:01] {2620} INFO - at 43.8s,\testimator extra_tree's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:01] {2458} INFO - iteration 214, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:01] {2620} INFO - at 43.8s,\testimator extra_tree's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:01] {2458} INFO - iteration 215, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:01] {2620} INFO - at 43.8s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:01] {2458} INFO - iteration 216, current learner prophet\n",
- "[flaml.automl: 02-28 21:29:04] {2620} INFO - at 46.5s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:04] {2458} INFO - iteration 217, current learner prophet\n",
- "[flaml.automl: 02-28 21:29:07] {2620} INFO - at 49.6s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:07] {2458} INFO - iteration 218, current learner xgboost\n",
- "[flaml.automl: 02-28 21:29:07] {2620} INFO - at 49.8s,\testimator xgboost's best error=0.0025,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:07] {2458} INFO - iteration 219, current learner sarimax\n",
- "[flaml.automl: 02-28 21:29:08] {2620} INFO - at 49.9s,\testimator sarimax's best error=0.0038,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:08] {2458} INFO - iteration 220, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:08] {2620} INFO - at 50.0s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:08] {2458} INFO - iteration 221, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:08] {2620} INFO - at 50.0s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:08] {2458} INFO - iteration 222, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:08] {2620} INFO - at 50.0s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:08] {2458} INFO - iteration 223, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:08] {2620} INFO - at 50.1s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:08] {2458} INFO - iteration 224, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:08] {2620} INFO - at 50.1s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:08] {2458} INFO - iteration 225, current learner prophet\n",
- "[flaml.automl: 02-28 21:29:11] {2620} INFO - at 53.4s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:11] {2458} INFO - iteration 226, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:29:11] {2620} INFO - at 53.4s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:11] {2458} INFO - iteration 227, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:11] {2620} INFO - at 53.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:11] {2458} INFO - iteration 228, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:11] {2620} INFO - at 53.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:11] {2458} INFO - iteration 229, current learner rf\n",
- "[flaml.automl: 02-28 21:29:11] {2620} INFO - at 53.5s,\testimator rf's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:11] {2458} INFO - iteration 230, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:11] {2620} INFO - at 53.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:11] {2458} INFO - iteration 231, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:29:11] {2620} INFO - at 53.5s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:11] {2458} INFO - iteration 232, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:11] {2620} INFO - at 53.6s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:11] {2458} INFO - iteration 233, current learner prophet\n",
- "[flaml.automl: 02-28 21:29:14] {2620} INFO - at 56.8s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:14] {2458} INFO - iteration 234, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:29:15] {2620} INFO - at 56.9s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:15] {2458} INFO - iteration 235, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:29:15] {2620} INFO - at 56.9s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:15] {2458} INFO - iteration 236, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:29:15] {2620} INFO - at 56.9s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:15] {2458} INFO - iteration 237, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:29:15] {2620} INFO - at 57.0s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:15] {2458} INFO - iteration 238, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:15] {2620} INFO - at 57.0s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:15] {2458} INFO - iteration 239, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:15] {2620} INFO - at 57.0s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:15] {2458} INFO - iteration 240, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:29:15] {2620} INFO - at 57.0s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:15] {2458} INFO - iteration 241, current learner rf\n",
- "[flaml.automl: 02-28 21:29:15] {2620} INFO - at 57.1s,\testimator rf's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:15] {2458} INFO - iteration 242, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:15] {2620} INFO - at 57.2s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:15] {2458} INFO - iteration 243, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:29:15] {2620} INFO - at 57.2s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:15] {2458} INFO - iteration 244, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:15] {2620} INFO - at 57.2s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:15] {2458} INFO - iteration 245, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:15] {2620} INFO - at 57.3s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:15] {2458} INFO - iteration 246, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:15] {2620} INFO - at 57.3s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:15] {2458} INFO - iteration 247, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:29:15] {2620} INFO - at 57.3s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:15] {2458} INFO - iteration 248, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:15] {2620} INFO - at 57.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:15] {2458} INFO - iteration 249, current learner prophet\n",
- "[flaml.automl: 02-28 21:29:19] {2620} INFO - at 61.3s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:19] {2458} INFO - iteration 250, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:19] {2620} INFO - at 61.4s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:19] {2458} INFO - iteration 251, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:19] {2620} INFO - at 61.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:19] {2458} INFO - iteration 252, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:29:19] {2620} INFO - at 61.4s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:19] {2458} INFO - iteration 253, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:19] {2620} INFO - at 61.5s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:19] {2458} INFO - iteration 254, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:19] {2620} INFO - at 61.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:19] {2458} INFO - iteration 255, current learner rf\n",
- "[flaml.automl: 02-28 21:29:19] {2620} INFO - at 61.5s,\testimator rf's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:19] {2458} INFO - iteration 256, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:19] {2620} INFO - at 61.6s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:19] {2458} INFO - iteration 257, current learner prophet\n",
- "[flaml.automl: 02-28 21:29:22] {2620} INFO - at 64.5s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:22] {2458} INFO - iteration 258, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:22] {2620} INFO - at 64.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:22] {2458} INFO - iteration 259, current learner sarimax\n",
- "[flaml.automl: 02-28 21:29:22] {2620} INFO - at 64.7s,\testimator sarimax's best error=0.0037,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:22] {2458} INFO - iteration 260, current learner rf\n",
- "[flaml.automl: 02-28 21:29:22] {2620} INFO - at 64.7s,\testimator rf's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:22] {2458} INFO - iteration 261, current learner sarimax\n",
- "[flaml.automl: 02-28 21:29:23] {2620} INFO - at 64.9s,\testimator sarimax's best error=0.0037,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:23] {2458} INFO - iteration 262, current learner xgboost\n",
- "[flaml.automl: 02-28 21:29:23] {2620} INFO - at 64.9s,\testimator xgboost's best error=0.0025,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:23] {2458} INFO - iteration 263, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:29:23] {2620} INFO - at 64.9s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:23] {2458} INFO - iteration 264, current learner xgboost\n",
- "[flaml.automl: 02-28 21:29:23] {2620} INFO - at 65.1s,\testimator xgboost's best error=0.0025,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:23] {2458} INFO - iteration 265, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:23] {2620} INFO - at 65.1s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:23] {2458} INFO - iteration 266, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:23] {2620} INFO - at 65.1s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:23] {2458} INFO - iteration 267, current learner xgboost\n",
- "[flaml.automl: 02-28 21:29:23] {2620} INFO - at 65.3s,\testimator xgboost's best error=0.0025,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:23] {2458} INFO - iteration 268, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:23] {2620} INFO - at 65.3s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:23] {2458} INFO - iteration 269, current learner prophet\n",
- "[flaml.automl: 02-28 21:29:26] {2620} INFO - at 68.8s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:26] {2458} INFO - iteration 270, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:26] {2620} INFO - at 68.8s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:26] {2458} INFO - iteration 271, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:26] {2620} INFO - at 68.8s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:26] {2458} INFO - iteration 272, current learner prophet\n",
- "[flaml.automl: 02-28 21:29:29] {2620} INFO - at 71.8s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:29] {2458} INFO - iteration 273, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:29] {2620} INFO - at 71.8s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:29] {2458} INFO - iteration 274, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:29] {2620} INFO - at 71.8s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:29] {2458} INFO - iteration 275, current learner rf\n",
- "[flaml.automl: 02-28 21:29:30] {2620} INFO - at 71.9s,\testimator rf's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:30] {2458} INFO - iteration 276, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:30] {2620} INFO - at 71.9s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:30] {2458} INFO - iteration 277, current learner rf\n",
- "[flaml.automl: 02-28 21:29:30] {2620} INFO - at 72.0s,\testimator rf's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:30] {2458} INFO - iteration 278, current learner rf\n",
- "[flaml.automl: 02-28 21:29:30] {2620} INFO - at 72.0s,\testimator rf's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:30] {2458} INFO - iteration 279, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:30] {2620} INFO - at 72.0s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:30] {2458} INFO - iteration 280, current learner xgboost\n",
- "[flaml.automl: 02-28 21:29:30] {2620} INFO - at 72.2s,\testimator xgboost's best error=0.0025,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:30] {2458} INFO - iteration 281, current learner rf\n",
- "[flaml.automl: 02-28 21:29:30] {2620} INFO - at 72.2s,\testimator rf's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:30] {2458} INFO - iteration 282, current learner sarimax\n",
- "[flaml.automl: 02-28 21:29:30] {2620} INFO - at 72.6s,\testimator sarimax's best error=0.0037,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:30] {2458} INFO - iteration 283, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:30] {2620} INFO - at 72.7s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:30] {2458} INFO - iteration 284, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:30] {2620} INFO - at 72.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:30] {2458} INFO - iteration 285, current learner rf\n",
- "[flaml.automl: 02-28 21:29:30] {2620} INFO - at 72.7s,\testimator rf's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:30] {2458} INFO - iteration 286, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:30] {2620} INFO - at 72.8s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:30] {2458} INFO - iteration 287, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:29:30] {2620} INFO - at 72.8s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:30] {2458} INFO - iteration 288, current learner prophet\n",
- "[flaml.automl: 02-28 21:29:33] {2620} INFO - at 75.7s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:33] {2458} INFO - iteration 289, current learner prophet\n",
- "[flaml.automl: 02-28 21:29:36] {2620} INFO - at 78.6s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:36] {2458} INFO - iteration 290, current learner xgboost\n",
- "[flaml.automl: 02-28 21:29:36] {2620} INFO - at 78.6s,\testimator xgboost's best error=0.0025,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:36] {2458} INFO - iteration 291, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:36] {2620} INFO - at 78.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:36] {2458} INFO - iteration 292, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:36] {2620} INFO - at 78.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:36] {2458} INFO - iteration 293, current learner prophet\n",
- "[flaml.automl: 02-28 21:29:39] {2620} INFO - at 81.7s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:39] {2458} INFO - iteration 294, current learner rf\n",
- "[flaml.automl: 02-28 21:29:39] {2620} INFO - at 81.7s,\testimator rf's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:39] {2458} INFO - iteration 295, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:29:39] {2620} INFO - at 81.7s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:39] {2458} INFO - iteration 296, current learner rf\n",
- "[flaml.automl: 02-28 21:29:39] {2620} INFO - at 81.8s,\testimator rf's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:39] {2458} INFO - iteration 297, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:39] {2620} INFO - at 81.8s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:39] {2458} INFO - iteration 298, current learner rf\n",
- "[flaml.automl: 02-28 21:29:40] {2620} INFO - at 81.9s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:40] {2458} INFO - iteration 299, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:40] {2620} INFO - at 81.9s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:40] {2458} INFO - iteration 300, current learner rf\n",
- "[flaml.automl: 02-28 21:29:40] {2620} INFO - at 81.9s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:40] {2458} INFO - iteration 301, current learner rf\n",
- "[flaml.automl: 02-28 21:29:40] {2620} INFO - at 82.0s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:40] {2458} INFO - iteration 302, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:40] {2620} INFO - at 82.0s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:40] {2458} INFO - iteration 303, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:40] {2620} INFO - at 82.0s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:40] {2458} INFO - iteration 304, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:40] {2620} INFO - at 82.1s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:40] {2458} INFO - iteration 305, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:40] {2620} INFO - at 82.1s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:40] {2458} INFO - iteration 306, current learner sarimax\n",
- "[flaml.automl: 02-28 21:29:40] {2620} INFO - at 82.2s,\testimator sarimax's best error=0.0037,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:40] {2458} INFO - iteration 307, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:40] {2620} INFO - at 82.2s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:40] {2458} INFO - iteration 308, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:29:40] {2620} INFO - at 82.2s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:40] {2458} INFO - iteration 309, current learner prophet\n",
- "[flaml.automl: 02-28 21:29:43] {2620} INFO - at 85.1s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:43] {2458} INFO - iteration 310, current learner xgboost\n",
- "[flaml.automl: 02-28 21:29:43] {2620} INFO - at 85.2s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:43] {2458} INFO - iteration 311, current learner xgboost\n",
- "[flaml.automl: 02-28 21:29:43] {2620} INFO - at 85.3s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:43] {2458} INFO - iteration 312, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:43] {2620} INFO - at 85.3s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:43] {2458} INFO - iteration 313, current learner rf\n",
- "[flaml.automl: 02-28 21:29:43] {2620} INFO - at 85.4s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:43] {2458} INFO - iteration 314, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:43] {2620} INFO - at 85.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:43] {2458} INFO - iteration 315, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:43] {2620} INFO - at 85.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:43] {2458} INFO - iteration 316, current learner xgboost\n",
- "[flaml.automl: 02-28 21:29:43] {2620} INFO - at 85.5s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:43] {2458} INFO - iteration 317, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:43] {2620} INFO - at 85.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:43] {2458} INFO - iteration 318, current learner prophet\n",
- "[flaml.automl: 02-28 21:29:46] {2620} INFO - at 88.4s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:46] {2458} INFO - iteration 319, current learner prophet\n",
- "[flaml.automl: 02-28 21:29:49] {2620} INFO - at 91.3s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:49] {2458} INFO - iteration 320, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:49] {2620} INFO - at 91.3s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:49] {2458} INFO - iteration 321, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:49] {2620} INFO - at 91.3s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:49] {2458} INFO - iteration 322, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:49] {2620} INFO - at 91.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:49] {2458} INFO - iteration 323, current learner rf\n",
- "[flaml.automl: 02-28 21:29:49] {2620} INFO - at 91.4s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:49] {2458} INFO - iteration 324, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:49] {2620} INFO - at 91.5s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:49] {2458} INFO - iteration 325, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:49] {2620} INFO - at 91.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:49] {2458} INFO - iteration 326, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:49] {2620} INFO - at 91.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:49] {2458} INFO - iteration 327, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:29:49] {2620} INFO - at 91.6s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:49] {2458} INFO - iteration 328, current learner rf\n",
- "[flaml.automl: 02-28 21:29:49] {2620} INFO - at 91.6s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:49] {2458} INFO - iteration 329, current learner xgboost\n",
- "[flaml.automl: 02-28 21:29:49] {2620} INFO - at 91.7s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:49] {2458} INFO - iteration 330, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:49] {2620} INFO - at 91.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:49] {2458} INFO - iteration 331, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:49] {2620} INFO - at 91.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:49] {2458} INFO - iteration 332, current learner rf\n",
- "[flaml.automl: 02-28 21:29:49] {2620} INFO - at 91.8s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:49] {2458} INFO - iteration 333, current learner rf\n",
- "[flaml.automl: 02-28 21:29:49] {2620} INFO - at 91.8s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:49] {2458} INFO - iteration 334, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:50] {2620} INFO - at 91.9s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:50] {2458} INFO - iteration 335, current learner xgboost\n",
- "[flaml.automl: 02-28 21:29:50] {2620} INFO - at 91.9s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:50] {2458} INFO - iteration 336, current learner xgboost\n",
- "[flaml.automl: 02-28 21:29:50] {2620} INFO - at 92.1s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:50] {2458} INFO - iteration 337, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:50] {2620} INFO - at 92.1s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:50] {2458} INFO - iteration 338, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:50] {2620} INFO - at 92.1s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:50] {2458} INFO - iteration 339, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:50] {2620} INFO - at 92.2s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:50] {2458} INFO - iteration 340, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:50] {2620} INFO - at 92.2s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:50] {2458} INFO - iteration 341, current learner rf\n",
- "[flaml.automl: 02-28 21:29:50] {2620} INFO - at 92.3s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:50] {2458} INFO - iteration 342, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:50] {2620} INFO - at 92.3s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:50] {2458} INFO - iteration 343, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:50] {2620} INFO - at 92.3s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:50] {2458} INFO - iteration 344, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:50] {2620} INFO - at 92.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:50] {2458} INFO - iteration 345, current learner prophet\n",
- "[flaml.automl: 02-28 21:29:54] {2620} INFO - at 96.0s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:54] {2458} INFO - iteration 346, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:29:54] {2620} INFO - at 96.0s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:54] {2458} INFO - iteration 347, current learner xgboost\n",
- "[flaml.automl: 02-28 21:29:54] {2620} INFO - at 96.1s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:54] {2458} INFO - iteration 348, current learner prophet\n",
- "[flaml.automl: 02-28 21:29:57] {2620} INFO - at 99.6s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:57] {2458} INFO - iteration 349, current learner arima\n",
- "[flaml.automl: 02-28 21:29:58] {2620} INFO - at 100.2s,\testimator arima's best error=0.0033,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:58] {2458} INFO - iteration 350, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:58] {2620} INFO - at 100.3s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:58] {2458} INFO - iteration 351, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:29:58] {2620} INFO - at 100.3s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:58] {2458} INFO - iteration 352, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:58] {2620} INFO - at 100.3s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:58] {2458} INFO - iteration 353, current learner xgboost\n",
- "[flaml.automl: 02-28 21:29:58] {2620} INFO - at 100.4s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:58] {2458} INFO - iteration 354, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:58] {2620} INFO - at 100.5s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:58] {2458} INFO - iteration 355, current learner xgboost\n",
- "[flaml.automl: 02-28 21:29:58] {2620} INFO - at 100.6s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:58] {2458} INFO - iteration 356, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:58] {2620} INFO - at 100.7s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:58] {2458} INFO - iteration 357, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:58] {2620} INFO - at 100.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:58] {2458} INFO - iteration 358, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:58] {2620} INFO - at 100.8s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:58] {2458} INFO - iteration 359, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:58] {2620} INFO - at 100.9s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:58] {2458} INFO - iteration 360, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:59] {2620} INFO - at 100.9s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:59] {2458} INFO - iteration 361, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:59] {2620} INFO - at 100.9s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:59] {2458} INFO - iteration 362, current learner xgboost\n",
- "[flaml.automl: 02-28 21:29:59] {2620} INFO - at 101.1s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:59] {2458} INFO - iteration 363, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:59] {2620} INFO - at 101.1s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:59] {2458} INFO - iteration 364, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:59] {2620} INFO - at 101.1s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:59] {2458} INFO - iteration 365, current learner rf\n",
- "[flaml.automl: 02-28 21:29:59] {2620} INFO - at 101.2s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:59] {2458} INFO - iteration 366, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:29:59] {2620} INFO - at 101.2s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:59] {2458} INFO - iteration 367, current learner sarimax\n",
- "[flaml.automl: 02-28 21:29:59] {2620} INFO - at 101.4s,\testimator sarimax's best error=0.0037,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:59] {2458} INFO - iteration 368, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:59] {2620} INFO - at 101.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:59] {2458} INFO - iteration 369, current learner lgbm\n",
- "[flaml.automl: 02-28 21:29:59] {2620} INFO - at 101.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:29:59] {2458} INFO - iteration 370, current learner prophet\n",
- "[flaml.automl: 02-28 21:30:02] {2620} INFO - at 104.6s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:02] {2458} INFO - iteration 371, current learner rf\n",
- "[flaml.automl: 02-28 21:30:02] {2620} INFO - at 104.7s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:02] {2458} INFO - iteration 372, current learner sarimax\n",
- "[flaml.automl: 02-28 21:30:03] {2620} INFO - at 105.0s,\testimator sarimax's best error=0.0031,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:03] {2458} INFO - iteration 373, current learner sarimax\n",
- "[flaml.automl: 02-28 21:30:03] {2620} INFO - at 105.7s,\testimator sarimax's best error=0.0031,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:03] {2458} INFO - iteration 374, current learner sarimax\n",
- "[flaml.automl: 02-28 21:30:04] {2620} INFO - at 105.9s,\testimator sarimax's best error=0.0031,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:04] {2458} INFO - iteration 375, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:30:04] {2620} INFO - at 105.9s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:04] {2458} INFO - iteration 376, current learner xgboost\n",
- "[flaml.automl: 02-28 21:30:04] {2620} INFO - at 106.0s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:04] {2458} INFO - iteration 377, current learner lgbm\n",
- "[flaml.automl: 02-28 21:30:04] {2620} INFO - at 106.1s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:04] {2458} INFO - iteration 378, current learner rf\n",
- "[flaml.automl: 02-28 21:30:04] {2620} INFO - at 106.1s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:04] {2458} INFO - iteration 379, current learner lgbm\n",
- "[flaml.automl: 02-28 21:30:04] {2620} INFO - at 106.2s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:04] {2458} INFO - iteration 380, current learner lgbm\n",
- "[flaml.automl: 02-28 21:30:04] {2620} INFO - at 106.2s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:04] {2458} INFO - iteration 381, current learner sarimax\n",
- "[flaml.automl: 02-28 21:30:04] {2620} INFO - at 106.7s,\testimator sarimax's best error=0.0031,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:04] {2458} INFO - iteration 382, current learner lgbm\n",
- "[flaml.automl: 02-28 21:30:04] {2620} INFO - at 106.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:04] {2458} INFO - iteration 383, current learner lgbm\n",
- "[flaml.automl: 02-28 21:30:04] {2620} INFO - at 106.8s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:04] {2458} INFO - iteration 384, current learner prophet\n",
- "[flaml.automl: 02-28 21:30:07] {2620} INFO - at 109.6s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:07] {2458} INFO - iteration 385, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:30:07] {2620} INFO - at 109.6s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:07] {2458} INFO - iteration 386, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:30:07] {2620} INFO - at 109.7s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:07] {2458} INFO - iteration 387, current learner lgbm\n",
- "[flaml.automl: 02-28 21:30:07] {2620} INFO - at 109.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:07] {2458} INFO - iteration 388, current learner rf\n",
- "[flaml.automl: 02-28 21:30:07] {2620} INFO - at 109.8s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:07] {2458} INFO - iteration 389, current learner xgboost\n",
- "[flaml.automl: 02-28 21:30:07] {2620} INFO - at 109.8s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:07] {2458} INFO - iteration 390, current learner sarimax\n",
- "[flaml.automl: 02-28 21:30:08] {2620} INFO - at 110.6s,\testimator sarimax's best error=0.0021,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:08] {2458} INFO - iteration 391, current learner xgboost\n",
- "[flaml.automl: 02-28 21:30:08] {2620} INFO - at 110.7s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:08] {2458} INFO - iteration 392, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:30:08] {2620} INFO - at 110.7s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:08] {2458} INFO - iteration 393, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:30:08] {2620} INFO - at 110.7s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:08] {2458} INFO - iteration 394, current learner sarimax\n",
- "[flaml.automl: 02-28 21:30:09] {2620} INFO - at 111.2s,\testimator sarimax's best error=0.0021,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:09] {2458} INFO - iteration 395, current learner sarimax\n",
- "[flaml.automl: 02-28 21:30:11] {2620} INFO - at 113.0s,\testimator sarimax's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:11] {2458} INFO - iteration 396, current learner lgbm\n",
- "[flaml.automl: 02-28 21:30:11] {2620} INFO - at 113.0s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:11] {2458} INFO - iteration 397, current learner xgboost\n",
- "[flaml.automl: 02-28 21:30:11] {2620} INFO - at 113.1s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:11] {2458} INFO - iteration 398, current learner lgbm\n",
- "[flaml.automl: 02-28 21:30:11] {2620} INFO - at 113.1s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:11] {2458} INFO - iteration 399, current learner prophet\n",
- "[flaml.automl: 02-28 21:30:14] {2620} INFO - at 115.9s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:14] {2458} INFO - iteration 400, current learner lgbm\n",
- "[flaml.automl: 02-28 21:30:14] {2620} INFO - at 115.9s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:14] {2458} INFO - iteration 401, current learner rf\n",
- "[flaml.automl: 02-28 21:30:14] {2620} INFO - at 116.0s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:14] {2458} INFO - iteration 402, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:30:14] {2620} INFO - at 116.0s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:14] {2458} INFO - iteration 403, current learner sarimax\n",
- "[flaml.automl: 02-28 21:30:15] {2620} INFO - at 117.8s,\testimator sarimax's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:15] {2458} INFO - iteration 404, current learner xgboost\n",
- "[flaml.automl: 02-28 21:30:16] {2620} INFO - at 117.9s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:16] {2458} INFO - iteration 405, current learner prophet\n",
- "[flaml.automl: 02-28 21:30:19] {2620} INFO - at 121.0s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:19] {2458} INFO - iteration 406, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:30:19] {2620} INFO - at 121.0s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:19] {2458} INFO - iteration 407, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:30:19] {2620} INFO - at 121.1s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:19] {2458} INFO - iteration 408, current learner sarimax\n",
- "[flaml.automl: 02-28 21:30:20] {2620} INFO - at 122.5s,\testimator sarimax's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:20] {2458} INFO - iteration 409, current learner prophet\n",
- "[flaml.automl: 02-28 21:30:23] {2620} INFO - at 125.3s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:23] {2458} INFO - iteration 410, current learner rf\n",
- "[flaml.automl: 02-28 21:30:23] {2620} INFO - at 125.3s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:23] {2458} INFO - iteration 411, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:30:23] {2620} INFO - at 125.4s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:23] {2458} INFO - iteration 412, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:30:23] {2620} INFO - at 125.4s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:23] {2458} INFO - iteration 413, current learner sarimax\n",
- "[flaml.automl: 02-28 21:30:25] {2620} INFO - at 127.2s,\testimator sarimax's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:25] {2458} INFO - iteration 414, current learner rf\n",
- "[flaml.automl: 02-28 21:30:25] {2620} INFO - at 127.3s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:25] {2458} INFO - iteration 415, current learner rf\n",
- "[flaml.automl: 02-28 21:30:25] {2620} INFO - at 127.3s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:25] {2458} INFO - iteration 416, current learner rf\n",
- "[flaml.automl: 02-28 21:30:25] {2620} INFO - at 127.4s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:25] {2458} INFO - iteration 417, current learner xgboost\n",
- "[flaml.automl: 02-28 21:30:25] {2620} INFO - at 127.4s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:25] {2458} INFO - iteration 418, current learner rf\n",
- "[flaml.automl: 02-28 21:30:25] {2620} INFO - at 127.5s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:25] {2458} INFO - iteration 419, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:30:25] {2620} INFO - at 127.5s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:25] {2458} INFO - iteration 420, current learner prophet\n",
- "[flaml.automl: 02-28 21:30:29] {2620} INFO - at 130.9s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:29] {2458} INFO - iteration 421, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:30:29] {2620} INFO - at 130.9s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:29] {2458} INFO - iteration 422, current learner prophet\n",
- "[flaml.automl: 02-28 21:30:31] {2620} INFO - at 133.8s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:31] {2458} INFO - iteration 423, current learner xgboost\n",
- "[flaml.automl: 02-28 21:30:32] {2620} INFO - at 133.9s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:32] {2458} INFO - iteration 424, current learner prophet\n",
- "[flaml.automl: 02-28 21:30:34] {2620} INFO - at 136.6s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:34] {2458} INFO - iteration 425, current learner xgboost\n",
- "[flaml.automl: 02-28 21:30:34] {2620} INFO - at 136.6s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:34] {2458} INFO - iteration 426, current learner prophet\n",
- "[flaml.automl: 02-28 21:30:38] {2620} INFO - at 140.0s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:38] {2458} INFO - iteration 427, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:30:38] {2620} INFO - at 140.0s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:38] {2458} INFO - iteration 428, current learner rf\n",
- "[flaml.automl: 02-28 21:30:38] {2620} INFO - at 140.1s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:38] {2458} INFO - iteration 429, current learner rf\n",
- "[flaml.automl: 02-28 21:30:38] {2620} INFO - at 140.1s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:38] {2458} INFO - iteration 430, current learner rf\n",
- "[flaml.automl: 02-28 21:30:38] {2620} INFO - at 140.2s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:38] {2458} INFO - iteration 431, current learner rf\n",
- "[flaml.automl: 02-28 21:30:38] {2620} INFO - at 140.2s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:38] {2458} INFO - iteration 432, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:30:38] {2620} INFO - at 140.3s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:38] {2458} INFO - iteration 433, current learner rf\n",
- "[flaml.automl: 02-28 21:30:38] {2620} INFO - at 140.3s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:38] {2458} INFO - iteration 434, current learner rf\n",
- "[flaml.automl: 02-28 21:30:38] {2620} INFO - at 140.4s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:38] {2458} INFO - iteration 435, current learner prophet\n",
- "[flaml.automl: 02-28 21:30:41] {2620} INFO - at 143.4s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:41] {2458} INFO - iteration 436, current learner xgboost\n",
- "[flaml.automl: 02-28 21:30:41] {2620} INFO - at 143.5s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:41] {2458} INFO - iteration 437, current learner prophet\n",
- "[flaml.automl: 02-28 21:30:45] {2620} INFO - at 146.9s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:45] {2458} INFO - iteration 438, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:30:45] {2620} INFO - at 146.9s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:45] {2458} INFO - iteration 439, current learner xgboost\n",
- "[flaml.automl: 02-28 21:30:45] {2620} INFO - at 146.9s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:45] {2458} INFO - iteration 440, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:30:45] {2620} INFO - at 147.0s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:45] {2458} INFO - iteration 441, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:30:45] {2620} INFO - at 147.0s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:45] {2458} INFO - iteration 442, current learner prophet\n",
- "[flaml.automl: 02-28 21:30:48] {2620} INFO - at 150.1s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:48] {2458} INFO - iteration 443, current learner xgboost\n",
- "[flaml.automl: 02-28 21:30:48] {2620} INFO - at 150.2s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:48] {2458} INFO - iteration 444, current learner sarimax\n",
- "[flaml.automl: 02-28 21:30:49] {2620} INFO - at 151.2s,\testimator sarimax's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:49] {2458} INFO - iteration 445, current learner rf\n",
- "[flaml.automl: 02-28 21:30:49] {2620} INFO - at 151.3s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:49] {2458} INFO - iteration 446, current learner prophet\n",
- "[flaml.automl: 02-28 21:30:52] {2620} INFO - at 154.3s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:52] {2458} INFO - iteration 447, current learner rf\n",
- "[flaml.automl: 02-28 21:30:52] {2620} INFO - at 154.3s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:52] {2458} INFO - iteration 448, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:30:52] {2620} INFO - at 154.3s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:52] {2458} INFO - iteration 449, current learner lgbm\n",
- "[flaml.automl: 02-28 21:30:52] {2620} INFO - at 154.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:52] {2458} INFO - iteration 450, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:30:52] {2620} INFO - at 154.4s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:52] {2458} INFO - iteration 451, current learner lgbm\n",
- "[flaml.automl: 02-28 21:30:52] {2620} INFO - at 154.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:52] {2458} INFO - iteration 452, current learner sarimax\n",
- "[flaml.automl: 02-28 21:30:55] {2620} INFO - at 157.4s,\testimator sarimax's best error=0.0012,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:55] {2458} INFO - iteration 453, current learner sarimax\n",
- "[flaml.automl: 02-28 21:30:57] {2620} INFO - at 159.0s,\testimator sarimax's best error=0.0012,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:57] {2458} INFO - iteration 454, current learner sarimax\n",
- "[flaml.automl: 02-28 21:30:59] {2620} INFO - at 160.9s,\testimator sarimax's best error=0.0012,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:30:59] {2458} INFO - iteration 455, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:00] {2620} INFO - at 161.9s,\testimator sarimax's best error=0.0012,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:00] {2458} INFO - iteration 456, current learner rf\n",
- "[flaml.automl: 02-28 21:31:00] {2620} INFO - at 162.0s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:00] {2458} INFO - iteration 457, current learner rf\n",
- "[flaml.automl: 02-28 21:31:00] {2620} INFO - at 162.0s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:00] {2458} INFO - iteration 458, current learner rf\n",
- "[flaml.automl: 02-28 21:31:00] {2620} INFO - at 162.1s,\testimator rf's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:00] {2458} INFO - iteration 459, current learner arima\n",
- "[flaml.automl: 02-28 21:31:00] {2620} INFO - at 162.2s,\testimator arima's best error=0.0033,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:00] {2458} INFO - iteration 460, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:01] {2620} INFO - at 163.7s,\testimator sarimax's best error=0.0012,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:01] {2458} INFO - iteration 461, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:02] {2620} INFO - at 164.6s,\testimator sarimax's best error=0.0010,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:02] {2458} INFO - iteration 462, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:03] {2620} INFO - at 165.7s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:03] {2458} INFO - iteration 463, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:05] {2620} INFO - at 166.9s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:05] {2458} INFO - iteration 464, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:05] {2620} INFO - at 167.4s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:05] {2458} INFO - iteration 465, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:08] {2620} INFO - at 170.0s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:08] {2458} INFO - iteration 466, current learner prophet\n",
- "[flaml.automl: 02-28 21:31:10] {2620} INFO - at 172.8s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:10] {2458} INFO - iteration 467, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:31:10] {2620} INFO - at 172.8s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:10] {2458} INFO - iteration 468, current learner xgboost\n",
- "[flaml.automl: 02-28 21:31:11] {2620} INFO - at 172.9s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:11] {2458} INFO - iteration 469, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:13] {2620} INFO - at 175.0s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:13] {2458} INFO - iteration 470, current learner xgboost\n",
- "[flaml.automl: 02-28 21:31:13] {2620} INFO - at 175.0s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:13] {2458} INFO - iteration 471, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:31:13] {2620} INFO - at 175.1s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:13] {2458} INFO - iteration 472, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:13] {2620} INFO - at 175.5s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:13] {2458} INFO - iteration 473, current learner prophet\n",
- "[flaml.automl: 02-28 21:31:16] {2620} INFO - at 178.3s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:16] {2458} INFO - iteration 474, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:16] {2620} INFO - at 178.8s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:16] {2458} INFO - iteration 475, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:31:16] {2620} INFO - at 178.8s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:16] {2458} INFO - iteration 476, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:19] {2620} INFO - at 181.4s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:19] {2458} INFO - iteration 477, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:31:19] {2620} INFO - at 181.5s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:19] {2458} INFO - iteration 478, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:21] {2620} INFO - at 183.3s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:21] {2458} INFO - iteration 479, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:31:21] {2620} INFO - at 183.3s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:21] {2458} INFO - iteration 480, current learner prophet\n",
- "[flaml.automl: 02-28 21:31:24] {2620} INFO - at 186.3s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:24] {2458} INFO - iteration 481, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:25] {2620} INFO - at 186.9s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
- "[flaml.automl: 02-28 21:31:25] {2458} INFO - iteration 482, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:27] {2620} INFO - at 189.0s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:27] {2458} INFO - iteration 483, current learner rf\n",
- "[flaml.automl: 02-28 21:31:27] {2620} INFO - at 189.0s,\testimator rf's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:27] {2458} INFO - iteration 484, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:29] {2620} INFO - at 191.0s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:29] {2458} INFO - iteration 485, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:30] {2620} INFO - at 192.2s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:30] {2458} INFO - iteration 486, current learner rf\n",
- "[flaml.automl: 02-28 21:31:30] {2620} INFO - at 192.2s,\testimator rf's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:30] {2458} INFO - iteration 487, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:33] {2620} INFO - at 194.9s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:33] {2458} INFO - iteration 488, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:31:33] {2620} INFO - at 195.0s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:33] {2458} INFO - iteration 489, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:35] {2620} INFO - at 197.6s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:35] {2458} INFO - iteration 490, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:37] {2620} INFO - at 199.1s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:37] {2458} INFO - iteration 491, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:40] {2620} INFO - at 202.8s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:40] {2458} INFO - iteration 492, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:43] {2620} INFO - at 204.9s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:43] {2458} INFO - iteration 493, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:44] {2620} INFO - at 206.7s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:44] {2458} INFO - iteration 494, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:31:44] {2620} INFO - at 206.8s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:44] {2458} INFO - iteration 495, current learner lgbm\n",
- "[flaml.automl: 02-28 21:31:44] {2620} INFO - at 206.8s,\testimator lgbm's best error=0.0022,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:44] {2458} INFO - iteration 496, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:45] {2620} INFO - at 207.6s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:45] {2458} INFO - iteration 497, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:31:45] {2620} INFO - at 207.7s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:45] {2458} INFO - iteration 498, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:47] {2620} INFO - at 209.8s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:47] {2458} INFO - iteration 499, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:49] {2620} INFO - at 211.7s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:49] {2458} INFO - iteration 500, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:31:49] {2620} INFO - at 211.8s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:49] {2458} INFO - iteration 501, current learner xgboost\n",
- "[flaml.automl: 02-28 21:31:49] {2620} INFO - at 211.8s,\testimator xgboost's best error=0.0024,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:49] {2458} INFO - iteration 502, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:31:50] {2620} INFO - at 211.9s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:50] {2458} INFO - iteration 503, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:52] {2620} INFO - at 213.9s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:52] {2458} INFO - iteration 504, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:53] {2620} INFO - at 215.7s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:53] {2458} INFO - iteration 505, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:54] {2620} INFO - at 216.6s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:54] {2458} INFO - iteration 506, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:56] {2620} INFO - at 218.2s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:56] {2458} INFO - iteration 507, current learner xgboost\n",
- "[flaml.automl: 02-28 21:31:56] {2620} INFO - at 218.3s,\testimator xgboost's best error=0.0024,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:56] {2458} INFO - iteration 508, current learner rf\n",
- "[flaml.automl: 02-28 21:31:56] {2620} INFO - at 218.4s,\testimator rf's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:56] {2458} INFO - iteration 509, current learner xgboost\n",
- "[flaml.automl: 02-28 21:31:56] {2620} INFO - at 218.5s,\testimator xgboost's best error=0.0023,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:56] {2458} INFO - iteration 510, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:31:56] {2620} INFO - at 218.5s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:56] {2458} INFO - iteration 511, current learner rf\n",
- "[flaml.automl: 02-28 21:31:56] {2620} INFO - at 218.6s,\testimator rf's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:56] {2458} INFO - iteration 512, current learner sarimax\n",
- "[flaml.automl: 02-28 21:31:59] {2620} INFO - at 220.9s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:59] {2458} INFO - iteration 513, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:31:59] {2620} INFO - at 220.9s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:31:59] {2458} INFO - iteration 514, current learner sarimax\n",
- "[flaml.automl: 02-28 21:32:00] {2620} INFO - at 222.8s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:00] {2458} INFO - iteration 515, current learner rf\n",
- "[flaml.automl: 02-28 21:32:00] {2620} INFO - at 222.8s,\testimator rf's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:00] {2458} INFO - iteration 516, current learner sarimax\n",
- "[flaml.automl: 02-28 21:32:02] {2620} INFO - at 224.8s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:02] {2458} INFO - iteration 517, current learner rf\n",
- "[flaml.automl: 02-28 21:32:02] {2620} INFO - at 224.8s,\testimator rf's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:02] {2458} INFO - iteration 518, current learner sarimax\n",
- "[flaml.automl: 02-28 21:32:05] {2620} INFO - at 227.1s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:05] {2458} INFO - iteration 519, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:32:05] {2620} INFO - at 227.1s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:05] {2458} INFO - iteration 520, current learner rf\n",
- "[flaml.automl: 02-28 21:32:05] {2620} INFO - at 227.2s,\testimator rf's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:05] {2458} INFO - iteration 521, current learner rf\n",
- "[flaml.automl: 02-28 21:32:05] {2620} INFO - at 227.2s,\testimator rf's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:05] {2458} INFO - iteration 522, current learner sarimax\n",
- "[flaml.automl: 02-28 21:32:07] {2620} INFO - at 229.1s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:07] {2458} INFO - iteration 523, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:32:07] {2620} INFO - at 229.1s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:07] {2458} INFO - iteration 524, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:32:07] {2620} INFO - at 229.1s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:07] {2458} INFO - iteration 525, current learner rf\n",
- "[flaml.automl: 02-28 21:32:07] {2620} INFO - at 229.2s,\testimator rf's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:07] {2458} INFO - iteration 526, current learner arima\n",
- "[flaml.automl: 02-28 21:32:07] {2620} INFO - at 229.3s,\testimator arima's best error=0.0033,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:07] {2458} INFO - iteration 527, current learner sarimax\n",
- "[flaml.automl: 02-28 21:32:09] {2620} INFO - at 231.5s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:09] {2458} INFO - iteration 528, current learner sarimax\n",
- "[flaml.automl: 02-28 21:32:12] {2620} INFO - at 233.9s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:12] {2458} INFO - iteration 529, current learner arima\n",
- "[flaml.automl: 02-28 21:32:12] {2620} INFO - at 234.3s,\testimator arima's best error=0.0033,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:12] {2458} INFO - iteration 530, current learner sarimax\n",
- "[flaml.automl: 02-28 21:32:13] {2620} INFO - at 235.3s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:13] {2458} INFO - iteration 531, current learner sarimax\n",
- "[flaml.automl: 02-28 21:32:15] {2620} INFO - at 237.7s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:15] {2458} INFO - iteration 532, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:32:15] {2620} INFO - at 237.7s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:15] {2458} INFO - iteration 533, current learner rf\n",
- "[flaml.automl: 02-28 21:32:15] {2620} INFO - at 237.8s,\testimator rf's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:15] {2458} INFO - iteration 534, current learner rf\n",
- "[flaml.automl: 02-28 21:32:15] {2620} INFO - at 237.8s,\testimator rf's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:15] {2458} INFO - iteration 535, current learner rf\n",
- "[flaml.automl: 02-28 21:32:16] {2620} INFO - at 237.9s,\testimator rf's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:16] {2458} INFO - iteration 536, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:32:16] {2620} INFO - at 237.9s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:16] {2458} INFO - iteration 537, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:32:16] {2620} INFO - at 237.9s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:16] {2458} INFO - iteration 538, current learner arima\n",
- "[flaml.automl: 02-28 21:32:16] {2620} INFO - at 238.0s,\testimator arima's best error=0.0033,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:16] {2458} INFO - iteration 539, current learner rf\n",
- "[flaml.automl: 02-28 21:32:16] {2620} INFO - at 238.0s,\testimator rf's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:16] {2458} INFO - iteration 540, current learner arima\n",
- "[flaml.automl: 02-28 21:32:16] {2620} INFO - at 238.3s,\testimator arima's best error=0.0033,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:16] {2458} INFO - iteration 541, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:32:16] {2620} INFO - at 238.3s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:16] {2458} INFO - iteration 542, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:32:16] {2620} INFO - at 238.4s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:16] {2458} INFO - iteration 543, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:32:16] {2620} INFO - at 238.4s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:16] {2458} INFO - iteration 544, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:32:16] {2620} INFO - at 238.4s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:16] {2458} INFO - iteration 545, current learner xgboost\n",
- "[flaml.automl: 02-28 21:32:16] {2620} INFO - at 238.6s,\testimator xgboost's best error=0.0023,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:16] {2458} INFO - iteration 546, current learner lgbm\n",
- "[flaml.automl: 02-28 21:32:16] {2620} INFO - at 238.6s,\testimator lgbm's best error=0.0022,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:16] {2458} INFO - iteration 547, current learner rf\n",
- "[flaml.automl: 02-28 21:32:16] {2620} INFO - at 238.7s,\testimator rf's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:16] {2458} INFO - iteration 548, current learner xgboost\n",
- "[flaml.automl: 02-28 21:32:16] {2620} INFO - at 238.8s,\testimator xgboost's best error=0.0023,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:16] {2458} INFO - iteration 549, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:32:16] {2620} INFO - at 238.8s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:16] {2458} INFO - iteration 550, current learner xgboost\n",
- "[flaml.automl: 02-28 21:32:17] {2620} INFO - at 239.1s,\testimator xgboost's best error=0.0023,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:17] {2458} INFO - iteration 551, current learner xgboost\n",
- "[flaml.automl: 02-28 21:32:17] {2620} INFO - at 239.2s,\testimator xgboost's best error=0.0023,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:17] {2458} INFO - iteration 552, current learner rf\n",
- "[flaml.automl: 02-28 21:32:17] {2620} INFO - at 239.2s,\testimator rf's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:17] {2458} INFO - iteration 553, current learner lgbm\n",
- "[flaml.automl: 02-28 21:32:17] {2620} INFO - at 239.2s,\testimator lgbm's best error=0.0022,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:17] {2458} INFO - iteration 554, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:32:17] {2620} INFO - at 239.3s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:17] {2458} INFO - iteration 555, current learner rf\n",
- "[flaml.automl: 02-28 21:32:17] {2620} INFO - at 239.3s,\testimator rf's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:17] {2458} INFO - iteration 556, current learner arima\n",
- "[flaml.automl: 02-28 21:32:18] {2620} INFO - at 240.0s,\testimator arima's best error=0.0033,\tbest estimator sarimax's best error=0.0004\n",
- "[flaml.automl: 02-28 21:32:18] {2850} INFO - retrain sarimax for 0.7s\n",
- "[flaml.automl: 02-28 21:32:18] {2857} INFO - retrained model: \n",
- "[flaml.automl: 02-28 21:32:18] {2234} INFO - fit succeeded\n",
- "[flaml.automl: 02-28 21:32:18] {2235} INFO - Time taken to find the best model: 188.97322726249695\n",
- "[flaml.automl: 02-28 21:32:18] {2246} WARNING - Time taken to find the best model is 79% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n"
+ "[flaml.automl: 07-28 21:10:44] {2478} INFO - task = ts_forecast\n",
+ "[flaml.automl: 07-28 21:10:44] {2480} INFO - Data split method: time\n",
+ "[flaml.automl: 07-28 21:10:44] {2483} INFO - Evaluation method: holdout\n",
+ "[flaml.automl: 07-28 21:10:44] {2552} INFO - Minimizing error metric: mape\n",
+ "[flaml.automl: 07-28 21:10:44] {2694} INFO - List of ML learners in AutoML Run: ['lgbm', 'rf', 'xgboost', 'extra_tree', 'xgb_limitdepth', 'prophet', 'arima', 'sarimax']\n",
+ "[flaml.automl: 07-28 21:10:44] {2986} INFO - iteration 0, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:44] {3114} INFO - Estimated sufficient time budget=2005s. Estimated necessary time budget=2s.\n",
+ "[flaml.automl: 07-28 21:10:44] {3161} INFO - at 0.7s,\testimator lgbm's best error=0.0621,\tbest estimator lgbm's best error=0.0621\n",
+ "[flaml.automl: 07-28 21:10:44] {2986} INFO - iteration 1, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:44] {3161} INFO - at 0.7s,\testimator lgbm's best error=0.0621,\tbest estimator lgbm's best error=0.0621\n",
+ "[flaml.automl: 07-28 21:10:44] {2986} INFO - iteration 2, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:44] {3161} INFO - at 0.8s,\testimator lgbm's best error=0.0277,\tbest estimator lgbm's best error=0.0277\n",
+ "[flaml.automl: 07-28 21:10:44] {2986} INFO - iteration 3, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:44] {3161} INFO - at 0.8s,\testimator lgbm's best error=0.0277,\tbest estimator lgbm's best error=0.0277\n",
+ "[flaml.automl: 07-28 21:10:44] {2986} INFO - iteration 4, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:44] {3161} INFO - at 0.9s,\testimator lgbm's best error=0.0175,\tbest estimator lgbm's best error=0.0175\n",
+ "[flaml.automl: 07-28 21:10:44] {2986} INFO - iteration 5, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:44] {3161} INFO - at 0.9s,\testimator lgbm's best error=0.0055,\tbest estimator lgbm's best error=0.0055\n",
+ "[flaml.automl: 07-28 21:10:44] {2986} INFO - iteration 6, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:44] {3161} INFO - at 1.0s,\testimator lgbm's best error=0.0055,\tbest estimator lgbm's best error=0.0055\n",
+ "[flaml.automl: 07-28 21:10:44] {2986} INFO - iteration 7, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:45] {3161} INFO - at 1.0s,\testimator lgbm's best error=0.0055,\tbest estimator lgbm's best error=0.0055\n",
+ "[flaml.automl: 07-28 21:10:45] {2986} INFO - iteration 8, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:45] {3161} INFO - at 1.0s,\testimator lgbm's best error=0.0031,\tbest estimator lgbm's best error=0.0031\n",
+ "[flaml.automl: 07-28 21:10:45] {2986} INFO - iteration 9, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:45] {3161} INFO - at 1.1s,\testimator lgbm's best error=0.0031,\tbest estimator lgbm's best error=0.0031\n",
+ "[flaml.automl: 07-28 21:10:45] {2986} INFO - iteration 10, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:45] {3161} INFO - at 1.1s,\testimator lgbm's best error=0.0027,\tbest estimator lgbm's best error=0.0027\n",
+ "[flaml.automl: 07-28 21:10:45] {2986} INFO - iteration 11, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:45] {3161} INFO - at 1.2s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
+ "[flaml.automl: 07-28 21:10:45] {2986} INFO - iteration 12, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:45] {3161} INFO - at 1.2s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
+ "[flaml.automl: 07-28 21:10:45] {2986} INFO - iteration 13, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:45] {3161} INFO - at 1.3s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
+ "[flaml.automl: 07-28 21:10:45] {2986} INFO - iteration 14, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:45] {3161} INFO - at 1.3s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
+ "[flaml.automl: 07-28 21:10:45] {2986} INFO - iteration 15, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:45] {3161} INFO - at 1.4s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
+ "[flaml.automl: 07-28 21:10:45] {2986} INFO - iteration 16, current learner rf\n",
+ "[flaml.automl: 07-28 21:10:45] {3161} INFO - at 1.6s,\testimator rf's best error=0.0217,\tbest estimator lgbm's best error=0.0022\n",
+ "[flaml.automl: 07-28 21:10:45] {2986} INFO - iteration 17, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:10:46] {3161} INFO - at 2.0s,\testimator xgboost's best error=0.6738,\tbest estimator lgbm's best error=0.0022\n",
+ "[flaml.automl: 07-28 21:10:46] {2986} INFO - iteration 18, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:10:46] {3161} INFO - at 2.1s,\testimator extra_tree's best error=0.0197,\tbest estimator lgbm's best error=0.0022\n",
+ "[flaml.automl: 07-28 21:10:46] {2986} INFO - iteration 19, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:10:46] {3161} INFO - at 2.2s,\testimator extra_tree's best error=0.0177,\tbest estimator lgbm's best error=0.0022\n",
+ "[flaml.automl: 07-28 21:10:46] {2986} INFO - iteration 20, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:10:46] {3161} INFO - at 2.2s,\testimator xgb_limitdepth's best error=0.0447,\tbest estimator lgbm's best error=0.0022\n",
+ "[flaml.automl: 07-28 21:10:46] {2986} INFO - iteration 21, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:10:46] {3161} INFO - at 2.2s,\testimator xgb_limitdepth's best error=0.0447,\tbest estimator lgbm's best error=0.0022\n",
+ "[flaml.automl: 07-28 21:10:46] {2986} INFO - iteration 22, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:10:46] {3161} INFO - at 2.3s,\testimator xgb_limitdepth's best error=0.0029,\tbest estimator lgbm's best error=0.0022\n",
+ "[flaml.automl: 07-28 21:10:46] {2986} INFO - iteration 23, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:46] {3161} INFO - at 2.4s,\testimator lgbm's best error=0.0022,\tbest estimator lgbm's best error=0.0022\n",
+ "[flaml.automl: 07-28 21:10:46] {2986} INFO - iteration 24, current learner rf\n",
+ "[flaml.automl: 07-28 21:10:46] {3161} INFO - at 2.4s,\testimator rf's best error=0.0217,\tbest estimator lgbm's best error=0.0022\n",
+ "[flaml.automl: 07-28 21:10:46] {2986} INFO - iteration 25, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:10:46] {3161} INFO - at 2.5s,\testimator xgb_limitdepth's best error=0.0029,\tbest estimator lgbm's best error=0.0022\n",
+ "[flaml.automl: 07-28 21:10:46] {2986} INFO - iteration 26, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:10:46] {3161} INFO - at 2.6s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator xgb_limitdepth's best error=0.0019\n",
+ "[flaml.automl: 07-28 21:10:46] {2986} INFO - iteration 27, current learner rf\n",
+ "[flaml.automl: 07-28 21:10:46] {3161} INFO - at 2.7s,\testimator rf's best error=0.0216,\tbest estimator xgb_limitdepth's best error=0.0019\n",
+ "[flaml.automl: 07-28 21:10:46] {2986} INFO - iteration 28, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:10:46] {3161} INFO - at 2.8s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator xgb_limitdepth's best error=0.0019\n",
+ "[flaml.automl: 07-28 21:10:46] {2986} INFO - iteration 29, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:10:46] {3161} INFO - at 2.9s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator xgb_limitdepth's best error=0.0019\n",
+ "[flaml.automl: 07-28 21:10:46] {2986} INFO - iteration 30, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:10:46] {3161} INFO - at 2.9s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator xgb_limitdepth's best error=0.0019\n",
+ "[flaml.automl: 07-28 21:10:46] {2986} INFO - iteration 31, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:47] {3161} INFO - at 3.0s,\testimator lgbm's best error=0.0022,\tbest estimator xgb_limitdepth's best error=0.0019\n",
+ "[flaml.automl: 07-28 21:10:47] {2986} INFO - iteration 32, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:47] {3161} INFO - at 3.1s,\testimator lgbm's best error=0.0022,\tbest estimator xgb_limitdepth's best error=0.0019\n",
+ "[flaml.automl: 07-28 21:10:47] {2986} INFO - iteration 33, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:10:47] {3161} INFO - at 3.2s,\testimator lgbm's best error=0.0022,\tbest estimator xgb_limitdepth's best error=0.0019\n",
+ "[flaml.automl: 07-28 21:10:47] {2986} INFO - iteration 34, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:10:47] {3161} INFO - at 3.3s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator xgb_limitdepth's best error=0.0019\n",
+ "[flaml.automl: 07-28 21:10:47] {2986} INFO - iteration 35, current learner prophet\n",
+ "[flaml.automl: 07-28 21:11:07] {3161} INFO - at 23.3s,\testimator prophet's best error=0.0008,\tbest estimator prophet's best error=0.0008\n",
+ "[flaml.automl: 07-28 21:11:07] {2986} INFO - iteration 36, current learner arima\n",
+ "[flaml.automl: 07-28 21:11:08] {3161} INFO - at 24.2s,\testimator arima's best error=0.0047,\tbest estimator prophet's best error=0.0008\n",
+ "[flaml.automl: 07-28 21:11:08] {2986} INFO - iteration 37, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:11:09] {3161} INFO - at 25.3s,\testimator sarimax's best error=0.0047,\tbest estimator prophet's best error=0.0008\n",
+ "[flaml.automl: 07-28 21:11:09] {2986} INFO - iteration 38, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:09] {3161} INFO - at 25.4s,\testimator xgboost's best error=0.6738,\tbest estimator prophet's best error=0.0008\n",
+ "[flaml.automl: 07-28 21:11:09] {2986} INFO - iteration 39, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:10] {3161} INFO - at 26.4s,\testimator extra_tree's best error=0.0177,\tbest estimator prophet's best error=0.0008\n",
+ "[flaml.automl: 07-28 21:11:10] {2986} INFO - iteration 40, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:11:10] {3161} INFO - at 26.6s,\testimator sarimax's best error=0.0047,\tbest estimator prophet's best error=0.0008\n",
+ "[flaml.automl: 07-28 21:11:10] {2986} INFO - iteration 41, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:11:10] {3161} INFO - at 26.7s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator prophet's best error=0.0008\n",
+ "[flaml.automl: 07-28 21:11:10] {2986} INFO - iteration 42, current learner arima\n",
+ "[flaml.automl: 07-28 21:11:10] {3161} INFO - at 26.9s,\testimator arima's best error=0.0047,\tbest estimator prophet's best error=0.0008\n",
+ "[flaml.automl: 07-28 21:11:10] {2986} INFO - iteration 43, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:10] {3161} INFO - at 26.9s,\testimator xgboost's best error=0.1712,\tbest estimator prophet's best error=0.0008\n",
+ "[flaml.automl: 07-28 21:11:10] {2986} INFO - iteration 44, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:11] {3161} INFO - at 27.0s,\testimator xgboost's best error=0.0257,\tbest estimator prophet's best error=0.0008\n",
+ "[flaml.automl: 07-28 21:11:11] {2986} INFO - iteration 45, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:11] {3161} INFO - at 27.0s,\testimator xgboost's best error=0.0257,\tbest estimator prophet's best error=0.0008\n",
+ "[flaml.automl: 07-28 21:11:11] {2986} INFO - iteration 46, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:11] {3161} INFO - at 27.1s,\testimator xgboost's best error=0.0242,\tbest estimator prophet's best error=0.0008\n",
+ "[flaml.automl: 07-28 21:11:11] {2986} INFO - iteration 47, current learner arima\n",
+ "[flaml.automl: 07-28 21:11:11] {3161} INFO - at 28.0s,\testimator arima's best error=0.0047,\tbest estimator prophet's best error=0.0008\n",
+ "[flaml.automl: 07-28 21:11:11] {2986} INFO - iteration 48, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:11:12] {3161} INFO - at 28.9s,\testimator sarimax's best error=0.0047,\tbest estimator prophet's best error=0.0008\n",
+ "[flaml.automl: 07-28 21:11:12] {2986} INFO - iteration 49, current learner prophet\n",
+ "[flaml.automl: 07-28 21:11:17] {3161} INFO - at 33.3s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:17] {2986} INFO - iteration 50, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:17] {3161} INFO - at 33.3s,\testimator xgboost's best error=0.0242,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:17] {2986} INFO - iteration 51, current learner arima\n",
+ "[flaml.automl: 07-28 21:11:17] {3161} INFO - at 33.5s,\testimator arima's best error=0.0044,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:17] {2986} INFO - iteration 52, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:17] {3161} INFO - at 33.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:17] {2986} INFO - iteration 53, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:11:17] {3161} INFO - at 33.6s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:17] {2986} INFO - iteration 54, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:11:18] {3161} INFO - at 34.4s,\testimator sarimax's best error=0.0047,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:18] {2986} INFO - iteration 55, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:18] {3161} INFO - at 34.5s,\testimator xgboost's best error=0.0242,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:18] {2986} INFO - iteration 56, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:18] {3161} INFO - at 34.5s,\testimator xgboost's best error=0.0191,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:18] {2986} INFO - iteration 57, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:18] {3161} INFO - at 34.6s,\testimator xgboost's best error=0.0191,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:18] {2986} INFO - iteration 58, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:18] {3161} INFO - at 34.6s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:18] {2986} INFO - iteration 59, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:11:18] {3161} INFO - at 34.6s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:18] {2986} INFO - iteration 60, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:18] {3161} INFO - at 34.7s,\testimator xgboost's best error=0.0103,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:18] {2986} INFO - iteration 61, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:18] {3161} INFO - at 34.7s,\testimator xgboost's best error=0.0081,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:18] {2986} INFO - iteration 62, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:18] {3161} INFO - at 34.8s,\testimator xgboost's best error=0.0081,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:18] {2986} INFO - iteration 63, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:18] {3161} INFO - at 34.8s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:18] {2986} INFO - iteration 64, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:18] {3161} INFO - at 34.8s,\testimator xgboost's best error=0.0081,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:18] {2986} INFO - iteration 65, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:18] {3161} INFO - at 34.9s,\testimator xgboost's best error=0.0041,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:18] {2986} INFO - iteration 66, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:18] {3161} INFO - at 35.0s,\testimator xgboost's best error=0.0041,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:19] {2986} INFO - iteration 67, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:19] {3161} INFO - at 35.1s,\testimator xgboost's best error=0.0029,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:19] {2986} INFO - iteration 68, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:19] {3161} INFO - at 35.2s,\testimator xgboost's best error=0.0029,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:19] {2986} INFO - iteration 69, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:19] {3161} INFO - at 35.3s,\testimator xgboost's best error=0.0028,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:19] {2986} INFO - iteration 70, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:11:19] {3161} INFO - at 35.3s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:19] {2986} INFO - iteration 71, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:11:19] {3161} INFO - at 35.3s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:19] {2986} INFO - iteration 72, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:19] {3161} INFO - at 35.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:19] {2986} INFO - iteration 73, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:11:19] {3161} INFO - at 35.4s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:19] {2986} INFO - iteration 74, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:11:19] {3161} INFO - at 35.4s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:19] {2986} INFO - iteration 75, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:11:19] {3161} INFO - at 35.5s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:19] {2986} INFO - iteration 76, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:11:19] {3161} INFO - at 35.6s,\testimator sarimax's best error=0.0047,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:19] {2986} INFO - iteration 77, current learner prophet\n",
+ "[flaml.automl: 07-28 21:11:24] {3161} INFO - at 40.9s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:24] {2986} INFO - iteration 78, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:11:25] {3161} INFO - at 41.3s,\testimator sarimax's best error=0.0041,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:25] {2986} INFO - iteration 79, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:25] {3161} INFO - at 41.3s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:25] {2986} INFO - iteration 80, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:25] {3161} INFO - at 41.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:25] {2986} INFO - iteration 81, current learner prophet\n",
+ "[flaml.automl: 07-28 21:11:30] {3161} INFO - at 46.9s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:30] {2986} INFO - iteration 82, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:31] {3161} INFO - at 47.1s,\testimator xgboost's best error=0.0027,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:31] {2986} INFO - iteration 83, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:31] {3161} INFO - at 47.1s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:31] {2986} INFO - iteration 84, current learner arima\n",
+ "[flaml.automl: 07-28 21:11:31] {3161} INFO - at 47.6s,\testimator arima's best error=0.0044,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:31] {2986} INFO - iteration 85, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:31] {3161} INFO - at 47.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:31] {2986} INFO - iteration 86, current learner prophet\n",
+ "[flaml.automl: 07-28 21:11:35] {3161} INFO - at 51.8s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:35] {2986} INFO - iteration 87, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:11:35] {3161} INFO - at 51.8s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:35] {2986} INFO - iteration 88, current learner prophet\n",
+ "[flaml.automl: 07-28 21:11:38] {3161} INFO - at 54.9s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:38] {2986} INFO - iteration 89, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:38] {3161} INFO - at 55.0s,\testimator extra_tree's best error=0.0177,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:38] {2986} INFO - iteration 90, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:11:39] {3161} INFO - at 55.0s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:39] {2986} INFO - iteration 91, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:39] {3161} INFO - at 55.0s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:39] {2986} INFO - iteration 92, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:39] {3161} INFO - at 55.1s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:39] {2986} INFO - iteration 93, current learner arima\n",
+ "[flaml.automl: 07-28 21:11:39] {3161} INFO - at 55.3s,\testimator arima's best error=0.0043,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:39] {2986} INFO - iteration 94, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:39] {3161} INFO - at 55.3s,\testimator xgboost's best error=0.0027,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:39] {2986} INFO - iteration 95, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:11:39] {3161} INFO - at 55.5s,\testimator sarimax's best error=0.0040,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:39] {2986} INFO - iteration 96, current learner arima\n",
+ "[flaml.automl: 07-28 21:11:40] {3161} INFO - at 56.3s,\testimator arima's best error=0.0033,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:40] {2986} INFO - iteration 97, current learner arima\n",
+ "[flaml.automl: 07-28 21:11:41] {3161} INFO - at 57.4s,\testimator arima's best error=0.0033,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:41] {2986} INFO - iteration 98, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:41] {3161} INFO - at 57.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:41] {2986} INFO - iteration 99, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:11:41] {3161} INFO - at 57.8s,\testimator sarimax's best error=0.0038,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:41] {2986} INFO - iteration 100, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:41] {3161} INFO - at 57.8s,\testimator extra_tree's best error=0.0089,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:41] {2986} INFO - iteration 101, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:41] {3161} INFO - at 57.8s,\testimator extra_tree's best error=0.0089,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:41] {2986} INFO - iteration 102, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:41] {3161} INFO - at 57.9s,\testimator extra_tree's best error=0.0089,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:41] {2986} INFO - iteration 103, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:42] {3161} INFO - at 58.0s,\testimator xgboost's best error=0.0026,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:42] {2986} INFO - iteration 104, current learner arima\n",
+ "[flaml.automl: 07-28 21:11:42] {3161} INFO - at 58.3s,\testimator arima's best error=0.0033,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:42] {2986} INFO - iteration 105, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:42] {3161} INFO - at 58.4s,\testimator extra_tree's best error=0.0089,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:42] {2986} INFO - iteration 106, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:42] {3161} INFO - at 58.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:42] {2986} INFO - iteration 107, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:42] {3161} INFO - at 58.5s,\testimator extra_tree's best error=0.0089,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:42] {2986} INFO - iteration 108, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:42] {3161} INFO - at 58.6s,\testimator xgboost's best error=0.0026,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:42] {2986} INFO - iteration 109, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:42] {3161} INFO - at 58.6s,\testimator extra_tree's best error=0.0089,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:42] {2986} INFO - iteration 110, current learner arima\n",
+ "[flaml.automl: 07-28 21:11:43] {3161} INFO - at 59.4s,\testimator arima's best error=0.0033,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:43] {2986} INFO - iteration 111, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:43] {3161} INFO - at 59.4s,\testimator extra_tree's best error=0.0089,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:43] {2986} INFO - iteration 112, current learner prophet\n",
+ "[flaml.automl: 07-28 21:11:47] {3161} INFO - at 63.3s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:47] {2986} INFO - iteration 113, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:47] {3161} INFO - at 63.4s,\testimator extra_tree's best error=0.0074,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:47] {2986} INFO - iteration 114, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:47] {3161} INFO - at 63.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:47] {2986} INFO - iteration 115, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:11:48] {3161} INFO - at 64.6s,\testimator sarimax's best error=0.0038,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:48] {2986} INFO - iteration 116, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:48] {3161} INFO - at 64.6s,\testimator extra_tree's best error=0.0074,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:48] {2986} INFO - iteration 117, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:11:48] {3161} INFO - at 64.8s,\testimator sarimax's best error=0.0038,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:48] {2986} INFO - iteration 118, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:48] {3161} INFO - at 64.8s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:48] {2986} INFO - iteration 119, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:48] {3161} INFO - at 64.8s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:48] {2986} INFO - iteration 120, current learner prophet\n",
+ "[flaml.automl: 07-28 21:11:52] {3161} INFO - at 68.2s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:52] {2986} INFO - iteration 121, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:52] {3161} INFO - at 68.2s,\testimator extra_tree's best error=0.0074,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:52] {2986} INFO - iteration 122, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:52] {3161} INFO - at 68.2s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:52] {2986} INFO - iteration 123, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:52] {3161} INFO - at 68.3s,\testimator extra_tree's best error=0.0074,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:52] {2986} INFO - iteration 124, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:52] {3161} INFO - at 68.4s,\testimator extra_tree's best error=0.0074,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:52] {2986} INFO - iteration 125, current learner prophet\n",
+ "[flaml.automl: 07-28 21:11:55] {3161} INFO - at 71.3s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:55] {2986} INFO - iteration 126, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:55] {3161} INFO - at 71.3s,\testimator extra_tree's best error=0.0055,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:55] {2986} INFO - iteration 127, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:55] {3161} INFO - at 71.4s,\testimator xgboost's best error=0.0026,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:55] {2986} INFO - iteration 128, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:55] {3161} INFO - at 71.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:55] {2986} INFO - iteration 129, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:55] {3161} INFO - at 71.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:55] {2986} INFO - iteration 130, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:55] {3161} INFO - at 71.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:55] {2986} INFO - iteration 131, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:55] {3161} INFO - at 71.5s,\testimator extra_tree's best error=0.0055,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:55] {2986} INFO - iteration 132, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:55] {3161} INFO - at 71.6s,\testimator extra_tree's best error=0.0055,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:55] {2986} INFO - iteration 133, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:55] {3161} INFO - at 71.6s,\testimator extra_tree's best error=0.0055,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:55] {2986} INFO - iteration 134, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:55] {3161} INFO - at 71.6s,\testimator extra_tree's best error=0.0051,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:55] {2986} INFO - iteration 135, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:55] {3161} INFO - at 71.7s,\testimator extra_tree's best error=0.0051,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:55] {2986} INFO - iteration 136, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:55] {3161} INFO - at 71.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:55] {2986} INFO - iteration 137, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:55] {3161} INFO - at 71.9s,\testimator xgboost's best error=0.0026,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:55] {2986} INFO - iteration 138, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:55] {3161} INFO - at 71.9s,\testimator extra_tree's best error=0.0051,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:55] {2986} INFO - iteration 139, current learner arima\n",
+ "[flaml.automl: 07-28 21:11:56] {3161} INFO - at 72.8s,\testimator arima's best error=0.0033,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:56] {2986} INFO - iteration 140, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:56] {3161} INFO - at 72.8s,\testimator extra_tree's best error=0.0051,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:56] {2986} INFO - iteration 141, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:56] {3161} INFO - at 72.9s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:56] {2986} INFO - iteration 142, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:56] {3161} INFO - at 72.9s,\testimator extra_tree's best error=0.0051,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:56] {2986} INFO - iteration 143, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:11:56] {3161} INFO - at 72.9s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:56] {2986} INFO - iteration 144, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:11:57] {3161} INFO - at 73.0s,\testimator extra_tree's best error=0.0051,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:57] {2986} INFO - iteration 145, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:11:57] {3161} INFO - at 73.1s,\testimator xgboost's best error=0.0026,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:11:57] {2986} INFO - iteration 146, current learner prophet\n",
+ "[flaml.automl: 07-28 21:12:00] {3161} INFO - at 76.1s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:00] {2986} INFO - iteration 147, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:00] {3161} INFO - at 76.2s,\testimator extra_tree's best error=0.0037,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:00] {2986} INFO - iteration 148, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:00] {3161} INFO - at 76.2s,\testimator extra_tree's best error=0.0037,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:00] {2986} INFO - iteration 149, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:12:00] {3161} INFO - at 76.4s,\testimator xgboost's best error=0.0026,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:00] {2986} INFO - iteration 150, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:00] {3161} INFO - at 76.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:00] {2986} INFO - iteration 151, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:00] {3161} INFO - at 76.5s,\testimator rf's best error=0.0150,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:00] {2986} INFO - iteration 152, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:00] {3161} INFO - at 76.6s,\testimator rf's best error=0.0150,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:00] {2986} INFO - iteration 153, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:00] {3161} INFO - at 76.7s,\testimator rf's best error=0.0096,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:00] {2986} INFO - iteration 154, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:00] {3161} INFO - at 76.7s,\testimator rf's best error=0.0096,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:00] {2986} INFO - iteration 155, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:00] {3161} INFO - at 76.8s,\testimator extra_tree's best error=0.0037,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:00] {2986} INFO - iteration 156, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:00] {3161} INFO - at 76.8s,\testimator rf's best error=0.0042,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:00] {2986} INFO - iteration 157, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:00] {3161} INFO - at 76.8s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:00] {2986} INFO - iteration 158, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:00] {3161} INFO - at 76.9s,\testimator rf's best error=0.0042,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:00] {2986} INFO - iteration 159, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:00] {3161} INFO - at 76.9s,\testimator extra_tree's best error=0.0037,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:00] {2986} INFO - iteration 160, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:01] {3161} INFO - at 77.0s,\testimator rf's best error=0.0042,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:01] {2986} INFO - iteration 161, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:01] {3161} INFO - at 77.0s,\testimator rf's best error=0.0036,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:01] {2986} INFO - iteration 162, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:01] {3161} INFO - at 77.1s,\testimator rf's best error=0.0036,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:01] {2986} INFO - iteration 163, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:01] {3161} INFO - at 77.1s,\testimator extra_tree's best error=0.0030,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:01] {2986} INFO - iteration 164, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:01] {3161} INFO - at 77.2s,\testimator rf's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:01] {2986} INFO - iteration 165, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:01] {3161} INFO - at 77.2s,\testimator rf's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:01] {2986} INFO - iteration 166, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:01] {3161} INFO - at 77.3s,\testimator extra_tree's best error=0.0027,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:01] {2986} INFO - iteration 167, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:01] {3161} INFO - at 77.4s,\testimator extra_tree's best error=0.0027,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:01] {2986} INFO - iteration 168, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:01] {3161} INFO - at 77.4s,\testimator rf's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:01] {2986} INFO - iteration 169, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:01] {3161} INFO - at 77.4s,\testimator rf's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:01] {2986} INFO - iteration 170, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:01] {3161} INFO - at 77.5s,\testimator rf's best error=0.0021,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:01] {2986} INFO - iteration 171, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:01] {3161} INFO - at 77.5s,\testimator extra_tree's best error=0.0027,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:01] {2986} INFO - iteration 172, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:01] {3161} INFO - at 77.6s,\testimator rf's best error=0.0021,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:01] {2986} INFO - iteration 173, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:01] {3161} INFO - at 77.6s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:01] {2986} INFO - iteration 174, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:01] {3161} INFO - at 77.6s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:01] {2986} INFO - iteration 175, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:01] {3161} INFO - at 77.7s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:01] {2986} INFO - iteration 176, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:01] {3161} INFO - at 77.7s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:01] {2986} INFO - iteration 177, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:01] {3161} INFO - at 77.8s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:01] {2986} INFO - iteration 178, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:01] {3161} INFO - at 77.8s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:01] {2986} INFO - iteration 179, current learner prophet\n",
+ "[flaml.automl: 07-28 21:12:05] {3161} INFO - at 81.2s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:05] {2986} INFO - iteration 180, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:05] {3161} INFO - at 81.3s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:05] {2986} INFO - iteration 181, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:05] {3161} INFO - at 81.3s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:05] {2986} INFO - iteration 182, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:05] {3161} INFO - at 81.4s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:05] {2986} INFO - iteration 183, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:05] {3161} INFO - at 81.4s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:05] {2986} INFO - iteration 184, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:05] {3161} INFO - at 81.4s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:05] {2986} INFO - iteration 185, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:05] {3161} INFO - at 81.5s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:05] {2986} INFO - iteration 186, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:05] {3161} INFO - at 81.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:05] {2986} INFO - iteration 187, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:05] {3161} INFO - at 81.6s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:05] {2986} INFO - iteration 188, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:05] {3161} INFO - at 81.6s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:05] {2986} INFO - iteration 189, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:05] {3161} INFO - at 81.6s,\testimator rf's best error=0.0021,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:05] {2986} INFO - iteration 190, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:05] {3161} INFO - at 81.7s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:05] {2986} INFO - iteration 191, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:05] {3161} INFO - at 81.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:05] {2986} INFO - iteration 192, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:05] {3161} INFO - at 81.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:05] {2986} INFO - iteration 193, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:12:05] {3161} INFO - at 81.8s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:05] {2986} INFO - iteration 194, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:05] {3161} INFO - at 81.8s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:05] {2986} INFO - iteration 195, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:12:05] {3161} INFO - at 81.9s,\testimator xgboost's best error=0.0026,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:05] {2986} INFO - iteration 196, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:05] {3161} INFO - at 81.9s,\testimator rf's best error=0.0021,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:05] {2986} INFO - iteration 197, current learner prophet\n",
+ "[flaml.automl: 07-28 21:12:09] {3161} INFO - at 85.5s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:09] {2986} INFO - iteration 198, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:09] {3161} INFO - at 85.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:09] {2986} INFO - iteration 199, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:09] {3161} INFO - at 85.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:09] {2986} INFO - iteration 200, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:09] {3161} INFO - at 85.6s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:09] {2986} INFO - iteration 201, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:09] {3161} INFO - at 85.6s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:09] {2986} INFO - iteration 202, current learner prophet\n",
+ "[flaml.automl: 07-28 21:12:12] {3161} INFO - at 88.9s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:12] {2986} INFO - iteration 203, current learner prophet\n",
+ "[flaml.automl: 07-28 21:12:16] {3161} INFO - at 92.8s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:16] {2986} INFO - iteration 204, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:16] {3161} INFO - at 92.8s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:16] {2986} INFO - iteration 205, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:16] {3161} INFO - at 92.9s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:16] {2986} INFO - iteration 206, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:12:17] {3161} INFO - at 93.0s,\testimator xgboost's best error=0.0026,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:17] {2986} INFO - iteration 207, current learner prophet\n",
+ "[flaml.automl: 07-28 21:12:20] {3161} INFO - at 96.0s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:20] {2986} INFO - iteration 208, current learner arima\n",
+ "[flaml.automl: 07-28 21:12:20] {3161} INFO - at 96.3s,\testimator arima's best error=0.0033,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:20] {2986} INFO - iteration 209, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:20] {3161} INFO - at 96.4s,\testimator rf's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:20] {2986} INFO - iteration 210, current learner prophet\n",
+ "[flaml.automl: 07-28 21:12:26] {3161} INFO - at 102.7s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:26] {2986} INFO - iteration 211, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:26] {3161} INFO - at 102.8s,\testimator rf's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:26] {2986} INFO - iteration 212, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:26] {3161} INFO - at 102.9s,\testimator rf's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:26] {2986} INFO - iteration 213, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:26] {3161} INFO - at 103.0s,\testimator rf's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:26] {2986} INFO - iteration 214, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:27] {3161} INFO - at 103.0s,\testimator rf's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:27] {2986} INFO - iteration 215, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:27] {3161} INFO - at 103.1s,\testimator rf's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:27] {2986} INFO - iteration 216, current learner prophet\n",
+ "[flaml.automl: 07-28 21:12:31] {3161} INFO - at 107.5s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:31] {2986} INFO - iteration 217, current learner prophet\n",
+ "[flaml.automl: 07-28 21:12:35] {3161} INFO - at 111.4s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:35] {2986} INFO - iteration 218, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:35] {3161} INFO - at 111.5s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:35] {2986} INFO - iteration 219, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:12:35] {3161} INFO - at 111.7s,\testimator sarimax's best error=0.0037,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:35] {2986} INFO - iteration 220, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:35] {3161} INFO - at 111.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:35] {2986} INFO - iteration 221, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:35] {3161} INFO - at 111.8s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:35] {2986} INFO - iteration 222, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:35] {3161} INFO - at 111.8s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:35] {2986} INFO - iteration 223, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:35] {3161} INFO - at 111.9s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:35] {2986} INFO - iteration 224, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:35] {3161} INFO - at 111.9s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:35] {2986} INFO - iteration 225, current learner prophet\n",
+ "[flaml.automl: 07-28 21:12:39] {3161} INFO - at 115.3s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:39] {2986} INFO - iteration 226, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:39] {3161} INFO - at 115.4s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:39] {2986} INFO - iteration 227, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:39] {3161} INFO - at 115.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:39] {2986} INFO - iteration 228, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:39] {3161} INFO - at 115.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:39] {2986} INFO - iteration 229, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:39] {3161} INFO - at 115.5s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:39] {2986} INFO - iteration 230, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:39] {3161} INFO - at 115.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:39] {2986} INFO - iteration 231, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:39] {3161} INFO - at 115.6s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:39] {2986} INFO - iteration 232, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:39] {3161} INFO - at 115.6s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:39] {2986} INFO - iteration 233, current learner prophet\n",
+ "[flaml.automl: 07-28 21:12:42] {3161} INFO - at 118.6s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:42] {2986} INFO - iteration 234, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:42] {3161} INFO - at 118.6s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:42] {2986} INFO - iteration 235, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:12:42] {3161} INFO - at 118.7s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:42] {2986} INFO - iteration 236, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:12:42] {3161} INFO - at 118.7s,\testimator xgb_limitdepth's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:42] {2986} INFO - iteration 237, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:12:42] {3161} INFO - at 118.7s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:42] {2986} INFO - iteration 238, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:12:42] {3161} INFO - at 118.8s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:42] {2986} INFO - iteration 239, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:12:42] {3161} INFO - at 118.8s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:42] {2986} INFO - iteration 240, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:12:42] {3161} INFO - at 118.8s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:42] {2986} INFO - iteration 241, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:42] {3161} INFO - at 118.9s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:42] {2986} INFO - iteration 242, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:12:42] {3161} INFO - at 119.0s,\testimator xgboost's best error=0.0026,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:42] {2986} INFO - iteration 243, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:12:43] {3161} INFO - at 119.0s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:43] {2986} INFO - iteration 244, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:43] {3161} INFO - at 119.0s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:43] {2986} INFO - iteration 245, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:43] {3161} INFO - at 119.1s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:43] {2986} INFO - iteration 246, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:43] {3161} INFO - at 119.2s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:43] {2986} INFO - iteration 247, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:12:43] {3161} INFO - at 119.2s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:43] {2986} INFO - iteration 248, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:43] {3161} INFO - at 119.3s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:43] {2986} INFO - iteration 249, current learner prophet\n",
+ "[flaml.automl: 07-28 21:12:46] {3161} INFO - at 122.4s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:46] {2986} INFO - iteration 250, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:46] {3161} INFO - at 122.4s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:46] {2986} INFO - iteration 251, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:46] {3161} INFO - at 122.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:46] {2986} INFO - iteration 252, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:46] {3161} INFO - at 122.5s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:46] {2986} INFO - iteration 253, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:12:46] {3161} INFO - at 122.7s,\testimator xgboost's best error=0.0025,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:46] {2986} INFO - iteration 254, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:46] {3161} INFO - at 122.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:46] {2986} INFO - iteration 255, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:46] {3161} INFO - at 122.8s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:46] {2986} INFO - iteration 256, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:12:46] {3161} INFO - at 123.0s,\testimator xgboost's best error=0.0025,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:46] {2986} INFO - iteration 257, current learner prophet\n",
+ "[flaml.automl: 07-28 21:12:50] {3161} INFO - at 126.2s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:50] {2986} INFO - iteration 258, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:50] {3161} INFO - at 126.2s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:50] {2986} INFO - iteration 259, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:12:50] {3161} INFO - at 126.4s,\testimator sarimax's best error=0.0037,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:50] {2986} INFO - iteration 260, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:50] {3161} INFO - at 126.4s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:50] {2986} INFO - iteration 261, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:12:50] {3161} INFO - at 126.9s,\testimator sarimax's best error=0.0037,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:50] {2986} INFO - iteration 262, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:50] {3161} INFO - at 126.9s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:50] {2986} INFO - iteration 263, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:12:50] {3161} INFO - at 127.0s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:50] {2986} INFO - iteration 264, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:51] {3161} INFO - at 127.0s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:51] {2986} INFO - iteration 265, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:51] {3161} INFO - at 127.1s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:51] {2986} INFO - iteration 266, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:51] {3161} INFO - at 127.1s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:51] {2986} INFO - iteration 267, current learner rf\n",
+ "[flaml.automl: 07-28 21:12:51] {3161} INFO - at 127.2s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:51] {2986} INFO - iteration 268, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:51] {3161} INFO - at 127.2s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:51] {2986} INFO - iteration 269, current learner prophet\n",
+ "[flaml.automl: 07-28 21:12:54] {3161} INFO - at 130.5s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:54] {2986} INFO - iteration 270, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:54] {3161} INFO - at 130.6s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:54] {2986} INFO - iteration 271, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:54] {3161} INFO - at 130.6s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:54] {2986} INFO - iteration 272, current learner prophet\n",
+ "[flaml.automl: 07-28 21:12:58] {3161} INFO - at 134.1s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:58] {2986} INFO - iteration 273, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:58] {3161} INFO - at 134.1s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:58] {2986} INFO - iteration 274, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:58] {3161} INFO - at 134.2s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:58] {2986} INFO - iteration 275, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:58] {3161} INFO - at 134.3s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:58] {2986} INFO - iteration 276, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:58] {3161} INFO - at 134.3s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:58] {2986} INFO - iteration 277, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:58] {3161} INFO - at 134.3s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:58] {2986} INFO - iteration 278, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:58] {3161} INFO - at 134.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:58] {2986} INFO - iteration 279, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:58] {3161} INFO - at 134.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:58] {2986} INFO - iteration 280, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:12:58] {3161} INFO - at 134.5s,\testimator xgboost's best error=0.0025,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:58] {2986} INFO - iteration 281, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:58] {3161} INFO - at 134.6s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:58] {2986} INFO - iteration 282, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:12:58] {3161} INFO - at 134.6s,\testimator sarimax's best error=0.0037,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:58] {2986} INFO - iteration 283, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:12:58] {3161} INFO - at 134.7s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:58] {2986} INFO - iteration 284, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:58] {3161} INFO - at 134.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:58] {2986} INFO - iteration 285, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:58] {3161} INFO - at 134.8s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:58] {2986} INFO - iteration 286, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:12:58] {3161} INFO - at 134.8s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:58] {2986} INFO - iteration 287, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:12:58] {3161} INFO - at 134.8s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:12:58] {2986} INFO - iteration 288, current learner prophet\n",
+ "[flaml.automl: 07-28 21:13:02] {3161} INFO - at 138.2s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:02] {2986} INFO - iteration 289, current learner prophet\n",
+ "[flaml.automl: 07-28 21:13:05] {3161} INFO - at 141.9s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:05] {2986} INFO - iteration 290, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:13:05] {3161} INFO - at 142.0s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:05] {2986} INFO - iteration 291, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:06] {3161} INFO - at 142.0s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:06] {2986} INFO - iteration 292, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:06] {3161} INFO - at 142.1s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:06] {2986} INFO - iteration 293, current learner prophet\n",
+ "[flaml.automl: 07-28 21:13:08] {3161} INFO - at 144.9s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:08] {2986} INFO - iteration 294, current learner rf\n",
+ "[flaml.automl: 07-28 21:13:09] {3161} INFO - at 145.0s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:09] {2986} INFO - iteration 295, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:13:09] {3161} INFO - at 145.0s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:09] {2986} INFO - iteration 296, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:09] {3161} INFO - at 145.2s,\testimator xgboost's best error=0.0025,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:09] {2986} INFO - iteration 297, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:13:09] {3161} INFO - at 145.2s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:09] {2986} INFO - iteration 298, current learner rf\n",
+ "[flaml.automl: 07-28 21:13:09] {3161} INFO - at 145.2s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:09] {2986} INFO - iteration 299, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:13:09] {3161} INFO - at 145.3s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:09] {2986} INFO - iteration 300, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:09] {3161} INFO - at 145.4s,\testimator xgboost's best error=0.0025,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:09] {2986} INFO - iteration 301, current learner rf\n",
+ "[flaml.automl: 07-28 21:13:09] {3161} INFO - at 145.5s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:09] {2986} INFO - iteration 302, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:09] {3161} INFO - at 145.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:09] {2986} INFO - iteration 303, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:09] {3161} INFO - at 145.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:09] {2986} INFO - iteration 304, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:09] {3161} INFO - at 145.6s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:09] {2986} INFO - iteration 305, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:09] {3161} INFO - at 145.6s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:09] {2986} INFO - iteration 306, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:13:09] {3161} INFO - at 145.8s,\testimator sarimax's best error=0.0037,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:09] {2986} INFO - iteration 307, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:09] {3161} INFO - at 145.8s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:09] {2986} INFO - iteration 308, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:13:09] {3161} INFO - at 145.8s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:09] {2986} INFO - iteration 309, current learner prophet\n",
+ "[flaml.automl: 07-28 21:13:13] {3161} INFO - at 149.0s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:13] {2986} INFO - iteration 310, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:13:13] {3161} INFO - at 149.0s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:13] {2986} INFO - iteration 311, current learner rf\n",
+ "[flaml.automl: 07-28 21:13:13] {3161} INFO - at 149.1s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:13] {2986} INFO - iteration 312, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:13] {3161} INFO - at 149.2s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:13] {2986} INFO - iteration 313, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:13] {3161} INFO - at 149.2s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:13] {2986} INFO - iteration 314, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:13] {3161} INFO - at 149.3s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:13] {2986} INFO - iteration 315, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:13] {3161} INFO - at 149.3s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:13] {2986} INFO - iteration 316, current learner rf\n",
+ "[flaml.automl: 07-28 21:13:13] {3161} INFO - at 149.4s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:13] {2986} INFO - iteration 317, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:13] {3161} INFO - at 149.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:13] {2986} INFO - iteration 318, current learner prophet\n",
+ "[flaml.automl: 07-28 21:13:16] {3161} INFO - at 152.3s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:16] {2986} INFO - iteration 319, current learner prophet\n",
+ "[flaml.automl: 07-28 21:13:19] {3161} INFO - at 155.3s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:19] {2986} INFO - iteration 320, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:19] {3161} INFO - at 155.3s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:19] {2986} INFO - iteration 321, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:19] {3161} INFO - at 155.3s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:19] {2986} INFO - iteration 322, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:19] {3161} INFO - at 155.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:19] {2986} INFO - iteration 323, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:19] {3161} INFO - at 155.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:19] {2986} INFO - iteration 324, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:13:19] {3161} INFO - at 155.5s,\testimator extra_tree's best error=0.0017,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:19] {2986} INFO - iteration 325, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:19] {3161} INFO - at 155.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:19] {2986} INFO - iteration 326, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:19] {3161} INFO - at 155.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:19] {2986} INFO - iteration 327, current learner prophet\n",
+ "[flaml.automl: 07-28 21:13:23] {3161} INFO - at 159.2s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:23] {2986} INFO - iteration 328, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:23] {3161} INFO - at 159.2s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:23] {2986} INFO - iteration 329, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:23] {3161} INFO - at 159.4s,\testimator xgboost's best error=0.0025,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:23] {2986} INFO - iteration 330, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:23] {3161} INFO - at 159.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:23] {2986} INFO - iteration 331, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:23] {3161} INFO - at 159.4s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:23] {2986} INFO - iteration 332, current learner rf\n",
+ "[flaml.automl: 07-28 21:13:23] {3161} INFO - at 159.5s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:23] {2986} INFO - iteration 333, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:23] {3161} INFO - at 159.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:23] {2986} INFO - iteration 334, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:23] {3161} INFO - at 159.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:23] {2986} INFO - iteration 335, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:23] {3161} INFO - at 159.6s,\testimator xgboost's best error=0.0025,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:23] {2986} INFO - iteration 336, current learner rf\n",
+ "[flaml.automl: 07-28 21:13:23] {3161} INFO - at 159.7s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:23] {2986} INFO - iteration 337, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:23] {3161} INFO - at 159.8s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:23] {2986} INFO - iteration 338, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:23] {3161} INFO - at 159.8s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:23] {2986} INFO - iteration 339, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:23] {3161} INFO - at 160.0s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:23] {2986} INFO - iteration 340, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:23] {3161} INFO - at 160.0s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:23] {2986} INFO - iteration 341, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:24] {3161} INFO - at 160.1s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:24] {2986} INFO - iteration 342, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:13:24] {3161} INFO - at 160.1s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:24] {2986} INFO - iteration 343, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:24] {3161} INFO - at 160.1s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:24] {2986} INFO - iteration 344, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:24] {3161} INFO - at 160.2s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:24] {2986} INFO - iteration 345, current learner prophet\n",
+ "[flaml.automl: 07-28 21:13:27] {3161} INFO - at 163.1s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:27] {2986} INFO - iteration 346, current learner prophet\n",
+ "[flaml.automl: 07-28 21:13:29] {3161} INFO - at 165.9s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:29] {2986} INFO - iteration 347, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:30] {3161} INFO - at 166.0s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:30] {2986} INFO - iteration 348, current learner prophet\n",
+ "[flaml.automl: 07-28 21:13:33] {3161} INFO - at 169.4s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:33] {2986} INFO - iteration 349, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:13:33] {3161} INFO - at 169.7s,\testimator sarimax's best error=0.0031,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:33] {2986} INFO - iteration 350, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:13:34] {3161} INFO - at 170.4s,\testimator sarimax's best error=0.0031,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:34] {2986} INFO - iteration 351, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:13:34] {3161} INFO - at 170.6s,\testimator sarimax's best error=0.0031,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:34] {2986} INFO - iteration 352, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:34] {3161} INFO - at 170.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:34] {2986} INFO - iteration 353, current learner arima\n",
+ "[flaml.automl: 07-28 21:13:35] {3161} INFO - at 171.3s,\testimator arima's best error=0.0033,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:35] {2986} INFO - iteration 354, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:13:35] {3161} INFO - at 171.7s,\testimator sarimax's best error=0.0031,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:35] {2986} INFO - iteration 355, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:35] {3161} INFO - at 171.8s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:35] {2986} INFO - iteration 356, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:13:35] {3161} INFO - at 171.8s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:35] {2986} INFO - iteration 357, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:35] {3161} INFO - at 171.8s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:35] {2986} INFO - iteration 358, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:13:35] {3161} INFO - at 171.9s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:35] {2986} INFO - iteration 359, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:36] {3161} INFO - at 172.1s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:36] {2986} INFO - iteration 360, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:36] {3161} INFO - at 172.1s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:36] {2986} INFO - iteration 361, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:36] {3161} INFO - at 172.1s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:36] {2986} INFO - iteration 362, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:36] {3161} INFO - at 172.2s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:36] {2986} INFO - iteration 363, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:36] {3161} INFO - at 172.2s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:36] {2986} INFO - iteration 364, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:13:36] {3161} INFO - at 172.3s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:36] {2986} INFO - iteration 365, current learner rf\n",
+ "[flaml.automl: 07-28 21:13:36] {3161} INFO - at 172.3s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:36] {2986} INFO - iteration 366, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:13:36] {3161} INFO - at 172.3s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:36] {2986} INFO - iteration 367, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:13:37] {3161} INFO - at 173.2s,\testimator sarimax's best error=0.0021,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:37] {2986} INFO - iteration 368, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:13:37] {3161} INFO - at 173.6s,\testimator sarimax's best error=0.0021,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:37] {2986} INFO - iteration 369, current learner rf\n",
+ "[flaml.automl: 07-28 21:13:37] {3161} INFO - at 173.6s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:37] {2986} INFO - iteration 370, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:13:39] {3161} INFO - at 175.5s,\testimator sarimax's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:39] {2986} INFO - iteration 371, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:39] {3161} INFO - at 175.6s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:39] {2986} INFO - iteration 372, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:13:41] {3161} INFO - at 177.5s,\testimator sarimax's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:41] {2986} INFO - iteration 373, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:41] {3161} INFO - at 177.6s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:41] {2986} INFO - iteration 374, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:13:43] {3161} INFO - at 179.2s,\testimator sarimax's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:43] {2986} INFO - iteration 375, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:43] {3161} INFO - at 179.4s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:43] {2986} INFO - iteration 376, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:43] {3161} INFO - at 179.5s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:43] {2986} INFO - iteration 377, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:43] {3161} INFO - at 179.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:43] {2986} INFO - iteration 378, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:43] {3161} INFO - at 179.5s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:43] {2986} INFO - iteration 379, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:43] {3161} INFO - at 179.6s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:43] {2986} INFO - iteration 380, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:43] {3161} INFO - at 179.6s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:43] {2986} INFO - iteration 381, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:13:43] {3161} INFO - at 179.7s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:43] {2986} INFO - iteration 382, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:43] {3161} INFO - at 179.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:43] {2986} INFO - iteration 383, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:13:43] {3161} INFO - at 179.7s,\testimator lgbm's best error=0.0022,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:43] {2986} INFO - iteration 384, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:13:45] {3161} INFO - at 181.5s,\testimator sarimax's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:45] {2986} INFO - iteration 385, current learner prophet\n",
+ "[flaml.automl: 07-28 21:13:48] {3161} INFO - at 184.8s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:48] {2986} INFO - iteration 386, current learner prophet\n",
+ "[flaml.automl: 07-28 21:13:52] {3161} INFO - at 188.3s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:52] {2986} INFO - iteration 387, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:52] {3161} INFO - at 188.4s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:52] {2986} INFO - iteration 388, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:52] {3161} INFO - at 188.5s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:52] {2986} INFO - iteration 389, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:13:52] {3161} INFO - at 188.5s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:52] {2986} INFO - iteration 390, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:13:53] {3161} INFO - at 189.5s,\testimator sarimax's best error=0.0019,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:53] {2986} INFO - iteration 391, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:53] {3161} INFO - at 189.6s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:53] {2986} INFO - iteration 392, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:53] {3161} INFO - at 189.7s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:53] {2986} INFO - iteration 393, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:53] {3161} INFO - at 189.7s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:53] {2986} INFO - iteration 394, current learner prophet\n",
+ "[flaml.automl: 07-28 21:13:57] {3161} INFO - at 193.0s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:57] {2986} INFO - iteration 395, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:13:57] {3161} INFO - at 193.1s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:57] {2986} INFO - iteration 396, current learner rf\n",
+ "[flaml.automl: 07-28 21:13:57] {3161} INFO - at 193.2s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:57] {2986} INFO - iteration 397, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:13:57] {3161} INFO - at 193.2s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:57] {2986} INFO - iteration 398, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:13:57] {3161} INFO - at 193.3s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:57] {2986} INFO - iteration 399, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:13:57] {3161} INFO - at 193.3s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:57] {2986} INFO - iteration 400, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:13:57] {3161} INFO - at 193.4s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:57] {2986} INFO - iteration 401, current learner rf\n",
+ "[flaml.automl: 07-28 21:13:57] {3161} INFO - at 193.4s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:57] {2986} INFO - iteration 402, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:13:57] {3161} INFO - at 193.5s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:57] {2986} INFO - iteration 403, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:13:57] {3161} INFO - at 193.5s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:57] {2986} INFO - iteration 404, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:13:57] {3161} INFO - at 193.6s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:57] {2986} INFO - iteration 405, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:13:57] {3161} INFO - at 193.6s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:57] {2986} INFO - iteration 406, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:13:57] {3161} INFO - at 193.7s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:57] {2986} INFO - iteration 407, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:13:57] {3161} INFO - at 193.7s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:13:57] {2986} INFO - iteration 408, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:01] {3161} INFO - at 197.0s,\testimator sarimax's best error=0.0012,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:01] {2986} INFO - iteration 409, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:02] {3161} INFO - at 198.8s,\testimator sarimax's best error=0.0012,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:02] {2986} INFO - iteration 410, current learner rf\n",
+ "[flaml.automl: 07-28 21:14:02] {3161} INFO - at 198.9s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:02] {2986} INFO - iteration 411, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:05] {3161} INFO - at 201.0s,\testimator sarimax's best error=0.0012,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:05] {2986} INFO - iteration 412, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:14:05] {3161} INFO - at 201.1s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:05] {2986} INFO - iteration 413, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:06] {3161} INFO - at 202.3s,\testimator sarimax's best error=0.0012,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:06] {2986} INFO - iteration 414, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:14:06] {3161} INFO - at 202.3s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:06] {2986} INFO - iteration 415, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:14:06] {3161} INFO - at 202.4s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:06] {2986} INFO - iteration 416, current learner rf\n",
+ "[flaml.automl: 07-28 21:14:06] {3161} INFO - at 202.4s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:06] {2986} INFO - iteration 417, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:14:06] {3161} INFO - at 202.5s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:06] {2986} INFO - iteration 418, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:14:06] {3161} INFO - at 202.6s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:06] {2986} INFO - iteration 419, current learner prophet\n",
+ "[flaml.automl: 07-28 21:14:09] {3161} INFO - at 205.7s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:09] {2986} INFO - iteration 420, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:11] {3161} INFO - at 207.4s,\testimator sarimax's best error=0.0012,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:11] {2986} INFO - iteration 421, current learner arima\n",
+ "[flaml.automl: 07-28 21:14:11] {3161} INFO - at 207.6s,\testimator arima's best error=0.0033,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:11] {2986} INFO - iteration 422, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:12] {3161} INFO - at 208.6s,\testimator sarimax's best error=0.0010,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:12] {2986} INFO - iteration 423, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:14:12] {3161} INFO - at 208.6s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:12] {2986} INFO - iteration 424, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:13] {3161} INFO - at 209.9s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:13] {2986} INFO - iteration 425, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:15] {3161} INFO - at 211.3s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:15] {2986} INFO - iteration 426, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:15] {3161} INFO - at 211.8s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:15] {2986} INFO - iteration 427, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:18] {3161} INFO - at 214.2s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:18] {2986} INFO - iteration 428, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:14:18] {3161} INFO - at 214.2s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:18] {2986} INFO - iteration 429, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:14:18] {3161} INFO - at 214.4s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:18] {2986} INFO - iteration 430, current learner rf\n",
+ "[flaml.automl: 07-28 21:14:18] {3161} INFO - at 214.4s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:18] {2986} INFO - iteration 431, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:14:18] {3161} INFO - at 214.5s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:18] {2986} INFO - iteration 432, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:20] {3161} INFO - at 216.7s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:20] {2986} INFO - iteration 433, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:14:20] {3161} INFO - at 216.8s,\testimator xgboost's best error=0.0024,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:20] {2986} INFO - iteration 434, current learner rf\n",
+ "[flaml.automl: 07-28 21:14:20] {3161} INFO - at 216.9s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:20] {2986} INFO - iteration 435, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:21] {3161} INFO - at 217.4s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:21] {2986} INFO - iteration 436, current learner prophet\n",
+ "[flaml.automl: 07-28 21:14:24] {3161} INFO - at 220.1s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:24] {2986} INFO - iteration 437, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:24] {3161} INFO - at 220.6s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:24] {2986} INFO - iteration 438, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:26] {3161} INFO - at 223.0s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:26] {2986} INFO - iteration 439, current learner prophet\n",
+ "[flaml.automl: 07-28 21:14:30] {3161} INFO - at 226.0s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:30] {2986} INFO - iteration 440, current learner prophet\n",
+ "[flaml.automl: 07-28 21:14:33] {3161} INFO - at 229.2s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:33] {2986} INFO - iteration 441, current learner prophet\n",
+ "[flaml.automl: 07-28 21:14:36] {3161} INFO - at 232.6s,\testimator prophet's best error=0.0005,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:36] {2986} INFO - iteration 442, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:38] {3161} INFO - at 234.4s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:38] {2986} INFO - iteration 443, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:14:38] {3161} INFO - at 234.4s,\testimator extra_tree's best error=0.0016,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:38] {2986} INFO - iteration 444, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:39] {3161} INFO - at 235.1s,\testimator sarimax's best error=0.0007,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:39] {2986} INFO - iteration 445, current learner rf\n",
+ "[flaml.automl: 07-28 21:14:39] {3161} INFO - at 235.1s,\testimator rf's best error=0.0018,\tbest estimator prophet's best error=0.0005\n",
+ "[flaml.automl: 07-28 21:14:39] {2986} INFO - iteration 446, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:41] {3161} INFO - at 237.4s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
+ "[flaml.automl: 07-28 21:14:41] {2986} INFO - iteration 447, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:14:41] {3161} INFO - at 237.5s,\testimator xgboost's best error=0.0024,\tbest estimator sarimax's best error=0.0004\n",
+ "[flaml.automl: 07-28 21:14:41] {2986} INFO - iteration 448, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:43] {3161} INFO - at 239.7s,\testimator sarimax's best error=0.0004,\tbest estimator sarimax's best error=0.0004\n",
+ "[flaml.automl: 07-28 21:14:43] {2986} INFO - iteration 449, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:14:43] {3161} INFO - at 239.7s,\testimator lgbm's best error=0.0022,\tbest estimator sarimax's best error=0.0004\n",
+ "[flaml.automl: 07-28 21:14:43] {2986} INFO - iteration 450, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:14:43] {3161} INFO - at 239.8s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
+ "[flaml.automl: 07-28 21:14:43] {2986} INFO - iteration 451, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:14:43] {3161} INFO - at 239.8s,\testimator lgbm's best error=0.0022,\tbest estimator sarimax's best error=0.0004\n",
+ "[flaml.automl: 07-28 21:14:43] {2986} INFO - iteration 452, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:14:43] {3161} INFO - at 239.8s,\testimator xgb_limitdepth's best error=0.0018,\tbest estimator sarimax's best error=0.0004\n",
+ "[flaml.automl: 07-28 21:14:43] {2986} INFO - iteration 453, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:14:43] {3161} INFO - at 239.9s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
+ "[flaml.automl: 07-28 21:14:43] {2986} INFO - iteration 454, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:14:43] {3161} INFO - at 239.9s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
+ "[flaml.automl: 07-28 21:14:43] {2986} INFO - iteration 455, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:14:43] {3161} INFO - at 239.9s,\testimator extra_tree's best error=0.0016,\tbest estimator sarimax's best error=0.0004\n",
+ "[flaml.automl: 07-28 21:14:43] {2986} INFO - iteration 456, current learner rf\n",
+ "[flaml.automl: 07-28 21:14:44] {3161} INFO - at 240.0s,\testimator rf's best error=0.0018,\tbest estimator sarimax's best error=0.0004\n",
+ "[flaml.automl: 07-28 21:14:44] {3425} INFO - retrain sarimax for 0.7s\n",
+ "[flaml.automl: 07-28 21:14:44] {3432} INFO - retrained model: \n",
+ "[flaml.automl: 07-28 21:14:44] {2725} INFO - fit succeeded\n",
+ "[flaml.automl: 07-28 21:14:44] {2726} INFO - Time taken to find the best model: 237.36335611343384\n",
+ "[flaml.automl: 07-28 21:14:44] {2737} WARNING - Time taken to find the best model is 99% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n"
]
}
],
@@ -1366,7 +1094,7 @@
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": 8,
"metadata": {},
"outputs": [
{
@@ -1374,9 +1102,9 @@
"output_type": "stream",
"text": [
"Best ML leaner: sarimax\n",
- "Best hyperparmeter config: {'p': 8.0, 'd': 0.0, 'q': 8.0, 'P': 6.0, 'D': 3.0, 'Q': 1.0, 's': 6}\n",
+ "Best hyperparmeter config: {'p': 8, 'd': 0, 'q': 8, 'P': 6, 'D': 3, 'Q': 1, 's': 6}\n",
"Best mape on validation data: 0.00043466573064228554\n",
- "Training duration of best run: 0.6672513484954834s\n"
+ "Training duration of best run: 0.7340686321258545s\n"
]
}
],
@@ -1390,16 +1118,16 @@
},
{
"cell_type": "code",
- "execution_count": 8,
+ "execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- ""
+ ""
]
},
- "execution_count": 8,
+ "execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
@@ -1410,7 +1138,7 @@
},
{
"cell_type": "code",
- "execution_count": 9,
+ "execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
@@ -1422,7 +1150,7 @@
},
{
"cell_type": "code",
- "execution_count": 10,
+ "execution_count": 11,
"metadata": {},
"outputs": [
{
@@ -1469,7 +1197,7 @@
},
{
"cell_type": "code",
- "execution_count": 11,
+ "execution_count": 12,
"metadata": {},
"outputs": [
{
@@ -1495,7 +1223,7 @@
},
{
"cell_type": "code",
- "execution_count": 12,
+ "execution_count": 13,
"metadata": {},
"outputs": [
{
@@ -1509,12 +1237,10 @@
"{'Current Learner': 'lgbm', 'Current Sample': 502, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 8, 'min_child_samples': 11, 'learning_rate': 0.8116893577982964, 'log_max_bin': 8, 'colsample_bytree': 0.97502360023323, 'reg_alpha': 0.0012398377555843262, 'reg_lambda': 0.02776044509327881, 'optimize_for_horizon': False, 'lags': 4}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 8, 'min_child_samples': 11, 'learning_rate': 0.8116893577982964, 'log_max_bin': 8, 'colsample_bytree': 0.97502360023323, 'reg_alpha': 0.0012398377555843262, 'reg_lambda': 0.02776044509327881, 'optimize_for_horizon': False, 'lags': 4}}\n",
"{'Current Learner': 'lgbm', 'Current Sample': 502, 'Current Hyper-parameters': {'n_estimators': 5, 'num_leaves': 16, 'min_child_samples': 7, 'learning_rate': 1.0, 'log_max_bin': 9, 'colsample_bytree': 0.9289697965752838, 'reg_alpha': 0.01291354098023607, 'reg_lambda': 0.012402833825431305, 'optimize_for_horizon': False, 'lags': 5}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 5, 'num_leaves': 16, 'min_child_samples': 7, 'learning_rate': 1.0, 'log_max_bin': 9, 'colsample_bytree': 0.9289697965752838, 'reg_alpha': 0.01291354098023607, 'reg_lambda': 0.012402833825431305, 'optimize_for_horizon': False, 'lags': 5}}\n",
"{'Current Learner': 'lgbm', 'Current Sample': 502, 'Current Hyper-parameters': {'n_estimators': 10, 'num_leaves': 13, 'min_child_samples': 8, 'learning_rate': 1.0, 'log_max_bin': 9, 'colsample_bytree': 0.915047969012756, 'reg_alpha': 0.1456985407754094, 'reg_lambda': 0.010186415963233664, 'optimize_for_horizon': False, 'lags': 9}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 10, 'num_leaves': 13, 'min_child_samples': 8, 'learning_rate': 1.0, 'log_max_bin': 9, 'colsample_bytree': 0.915047969012756, 'reg_alpha': 0.1456985407754094, 'reg_lambda': 0.010186415963233664, 'optimize_for_horizon': False, 'lags': 9}}\n",
- "{'Current Learner': 'rf', 'Current Sample': 502, 'Current Hyper-parameters': {'n_estimators': 4, 'max_features': 0.7336821866058406, 'max_leaves': 37, 'optimize_for_horizon': False, 'lags': 10}, 'Best Learner': 'rf', 'Best Hyper-parameters': {'n_estimators': 4, 'max_features': 0.7336821866058406, 'max_leaves': 37, 'optimize_for_horizon': False, 'lags': 10}}\n",
- "{'Current Learner': 'rf', 'Current Sample': 502, 'Current Hyper-parameters': {'n_estimators': 4, 'max_features': 0.776140805521135, 'max_leaves': 71, 'optimize_for_horizon': False, 'lags': 10}, 'Best Learner': 'rf', 'Best Hyper-parameters': {'n_estimators': 4, 'max_features': 0.776140805521135, 'max_leaves': 71, 'optimize_for_horizon': False, 'lags': 10}}\n",
+ "{'Current Learner': 'xgb', 'Current Sample': 502, 'Current Hyper-parameters': {'n_estimators': 17, 'max_depth': 6, 'min_child_weight': 1.1257301179325647, 'learning_rate': 0.3420575416463879, 'subsample': 1.0, 'colsample_bylevel': 0.8634518942394397, 'colsample_bytree': 0.8183410599521093, 'reg_alpha': 0.0031517221935712125, 'reg_lambda': 0.36563645650488746, 'optimize_for_horizon': False, 'lags': 1}, 'Best Learner': 'xgb', 'Best Hyper-parameters': {'n_estimators': 17, 'max_depth': 6, 'min_child_weight': 1.1257301179325647, 'learning_rate': 0.3420575416463879, 'subsample': 1.0, 'colsample_bylevel': 0.8634518942394397, 'colsample_bytree': 0.8183410599521093, 'reg_alpha': 0.0031517221935712125, 'reg_lambda': 0.36563645650488746, 'optimize_for_horizon': False, 'lags': 1}}\n",
"{'Current Learner': 'prophet', 'Current Sample': 502, 'Current Hyper-parameters': {'changepoint_prior_scale': 0.05, 'seasonality_prior_scale': 10.0, 'holidays_prior_scale': 10.0, 'seasonality_mode': 'multiplicative'}, 'Best Learner': 'prophet', 'Best Hyper-parameters': {'changepoint_prior_scale': 0.05, 'seasonality_prior_scale': 10.0, 'holidays_prior_scale': 10.0, 'seasonality_mode': 'multiplicative'}}\n",
"{'Current Learner': 'prophet', 'Current Sample': 502, 'Current Hyper-parameters': {'changepoint_prior_scale': 0.02574943279263944, 'seasonality_prior_scale': 10.0, 'holidays_prior_scale': 10.0, 'seasonality_mode': 'additive'}, 'Best Learner': 'prophet', 'Best Hyper-parameters': {'changepoint_prior_scale': 0.02574943279263944, 'seasonality_prior_scale': 10.0, 'holidays_prior_scale': 10.0, 'seasonality_mode': 'additive'}}\n",
- "{'Current Learner': 'prophet', 'Current Sample': 502, 'Current Hyper-parameters': {'changepoint_prior_scale': 0.029044518309983725, 'seasonality_prior_scale': 10.0, 'holidays_prior_scale': 8.831739687246309, 'seasonality_mode': 'additive'}, 'Best Learner': 'prophet', 'Best Hyper-parameters': {'changepoint_prior_scale': 0.029044518309983725, 'seasonality_prior_scale': 10.0, 'holidays_prior_scale': 8.831739687246309, 'seasonality_mode': 'additive'}}\n",
- "{'Current Learner': 'prophet', 'Current Sample': 502, 'Current Hyper-parameters': {'changepoint_prior_scale': 0.02907295015483903, 'seasonality_prior_scale': 10.0, 'holidays_prior_scale': 10.0, 'seasonality_mode': 'additive'}, 'Best Learner': 'prophet', 'Best Hyper-parameters': {'changepoint_prior_scale': 0.02907295015483903, 'seasonality_prior_scale': 10.0, 'holidays_prior_scale': 10.0, 'seasonality_mode': 'additive'}}\n"
+ "{'Current Learner': 'prophet', 'Current Sample': 502, 'Current Hyper-parameters': {'changepoint_prior_scale': 0.029044518309983725, 'seasonality_prior_scale': 10.0, 'holidays_prior_scale': 8.831739687246309, 'seasonality_mode': 'additive'}, 'Best Learner': 'prophet', 'Best Hyper-parameters': {'changepoint_prior_scale': 0.029044518309983725, 'seasonality_prior_scale': 10.0, 'holidays_prior_scale': 8.831739687246309, 'seasonality_mode': 'additive'}}\n"
]
}
],
@@ -1529,12 +1255,12 @@
},
{
"cell_type": "code",
- "execution_count": 13,
+ "execution_count": 14,
"metadata": {},
"outputs": [
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAY4AAAEWCAYAAABxMXBSAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAgAElEQVR4nO3de5xdVX338c+XIYFAhQkQKEyARA2RcA1Og0ilAtoAIoRIFajVotxaoSg1FrBeqI8lmnrBSs0TkVqQm0SIwScSLCi0FEgCk4QkEBsSJDNBCYUAQiS33/PHXoMnp2dO9k6y55yZ+b5fr/PK2WuvvfdvZnLmN2utvddSRGBmZpbXDo0OwMzM+hYnDjMzK8SJw8zMCnHiMDOzQpw4zMysECcOMzMrxInDbCtIepekpY2Ow6wRnDisz5H0tKT3NDKGiPiPiBhd1vkljZf0gKRXJK2WdL+k08q6nlkRThxmNUhqaeC1zwRuB24AhgP7AJ8H3r8V55Ikf85tu/J/KOs3JO0g6XJJT0n6H0k/lLRHxf7bJf1a0kvpr/lDKvZ9X9J3JM2S9CpwfGrZfFrSwnTMbZJ2TvXfLamz4vge66b9n5H0rKRVks6TFJLeWuNrEPB14EsRcV1EvBQRmyLi/og4P9X5oqQfVBwzIp1vx7T9C0lflvQg8BpwpaR5Vdf5lKSZ6f1Okv5J0jOSfiNpqqQh2/jjsH7MicP6k78BJgB/AuwHvAhcW7H/p8AoYG/gMeCmquPPAb4MvAn4z1T2QeAkYCRwOPCXda5fs66kk4DLgPcAb03x9WQ0sD8wvU6dPP4CuIDsa/lnYLSkURX7zwFuTu+/AhwEHJniayNr4ZjV5MRh/cmFwGcjojMiXge+CJzZ/Zd4RFwfEa9U7DtC0u4Vx/84Ih5Mf+H/LpV9KyJWRcQLwF1kv1x70lPdDwL/GhGLI+I14Ko659gz/fts7q+6tu+n622IiJeAHwNnA6QE8jZgZmrhnA98KiJeiIhXgH8EztrG61s/5sRh/cmBwJ2S1khaAzwBbAT2kdQiaXLqxnoZeDods1fF8StrnPPXFe9fA/6gzvV7qrtf1blrXafb/6R/961TJ4/qa9xMShxkrY0ZKYkNA3YBHq34vt2dys1qcuKw/mQlcHJEtFa8do6ILrJflqeTdRftDoxIx6ji+LKmin6WbJC72/516i4l+zo+UKfOq2S/7Lv9YY061V/LPcBeko4kSyDd3VTPA2uBQyq+Z7tHRL0EaQOcE4f1VYMk7Vzx2hGYCnxZ0oEAkoZJOj3VfxPwOtlf9LuQdcf0lh8C50o6WNIu1Bk/iGydg8uAz0k6V9JuadD/jyVNS9XmA8dJOiB1tV2xpQAiYgPZuMkUYA/gZ6l8E/Bd4BuS9gaQ1CZp/FZ/tdbvOXFYXzWL7C/l7tcXgWuAmcA9kl4BHgaOTvVvAH4FdAFL0r5eERE/Bb4F/BxYBjyUdr3eQ/3pwIeAjwGrgN8A/4dsnIKI+BlwG7AQeBT4Sc5QbiZrcd2eEkm3v0txPZy68f6dbJDerCZ5ISez3iXpYGARsFPVL3CzPsEtDrNeIOkMSYMlDSW7/fUuJw3rq5w4zHrHhcBq4CmyO73+qrHhmG09d1WZmVkhbnGYmVkhOzY6gO1pr732ihEjRjQ6DDOzPuPRRx99PiIKPfDZrxLHiBEjmDdv3pYrmpkZAJJ+VfQYd1WZmVkhThxmZlaIE4eZmRXixGFmZoWUljgkXS/pOUmLetgvSd+StCytmnZUxb6TJC1N+y4vK0YzMyuuzLuqvg98m2xyuVpOJluNbRTZRHTfAY5Oaz1fC7wX6ATmSpoZEUtKjLVhZnR0MWX2UlatWct+rUOYNH40E8a29ZvrmVl5GvV5Li1xRMQDkkbUqXI6cEOaRvphSa2S9iVbJ2FZRCwHkHRrqttnEkfeH+aMji6uuONx1q7fCEDXmrVcccfjAKX88Hv7emZWnkZ+nhv5HEcbm69S1pnKapUfTQ8kXUC2tjIHHHDA9o+yoCI/zCmzl75Rr9va9Rv5zPSF3DLnme0eW8cza1i3cVOvXc/MytPT53nK7KX9OnGoRlnUKa8pIqYB0wDa29sbPvFWkWTQtWZtzXNU/2fYXno6b1nXM7Py9PS5XdXD75XtqZGJo5PNl9AcTrZozeAeynvd1vQf9vRDq/VDHtyyQ83yttYh3HbhMVsXdB3HTr6vZrIq63pmVp6ePs/7tQ4p/dqNvB13JvCRdHfVO4CXIuJZYC4wStJISYOBs1LdXjWjo4tJ0xfQtWYtQdY6mDR9ATM6uuoe19MPrfuXc+Xrq2cezpBBLZvVGzKohUnjy1l8bdL40b16PTMrTyM/z6W1OCTdArwb2EtSJ/AFYBBAREwlW/rzFLIlK18Dzk37Nki6GJgNtADXR8TisuLsqVVx1V2LWb9x856v9RuDT9++oO54wM6DdmAHwaaKQ3v6YXa3Xnrrrojevp6ZlaeRn+d+tR5He3t7FJnksLtVUZkgBrWIKWcewSdvm9/jcUeP3KPueZ//7eusfGEt6zZuos2/nM2siUl6NCLaixzTr2bHLapeq6IejweY2UA2oKccefG19TXLN2wKWnaodXMXtA4ZVGZIZmZNb0Anjnq+9mdHMKgqeQzaQXzxtEMaFJGZWXMY0F1VovYDIsIDyWZmPRnQiaOn2wK6yyeMbXOiMDOrMqC7qtrqPHNhZma1DejE4QfizMyKG9BdVd3dUJ+ZvtDPXJiZ5TSgWxxmZlbcgE4c3VOgd0802D0F+pbmozIzG8gGdOLoaQr0KbOXNigiM7PmN6ATR09ToPfGfPZmZn3VgE4cPU2B3hvz2ZuZ9VUDOnH4dlwzs+J8Oy6+HdfMrIgBnTggSx7dCzN5unQzsy0b0F1VZmZWnBOHmZkV4sRhZmaFOHGYmVkhThxmZlaIE4eZmRXixGFmZoWUmjgknSRpqaRlki6vsX+opDslLZQ0R9KhFfsulbRI0mJJnywzTjMzy6+0xCGpBbgWOBkYA5wtaUxVtSuB+RFxOPAR4Jp07KHA+cA44AjgVEmjyorVzMzyK7PFMQ5YFhHLI2IdcCtwelWdMcC9ABHxJDBC0j7AwcDDEfFaRGwA7gfOKDFWMzPLqczE0QasrNjuTGWVFgATASSNAw4EhgOLgOMk7SlpF+AUYP9aF5F0gaR5kuatXr16O38JZmZWrczEoRplUbU9GRgqaT5wCdABbIiIJ4CvAD8D7iZLMBtqXSQipkVEe0S0Dxs2bLsFb2ZmtZU5yWEnm7cShgOrKitExMvAuQCSBKxILyLie8D30r5/TOczM7MGK7PFMRcYJWmkpMHAWcDMygqSWtM+gPOAB1IyQdLe6d8DyLqzbikxVjMzy6m0FkdEbJB0MTAbaAGuj4jFki5K+6eSDYLfIGkjsAT4eMUpfiRpT2A98ImIeLGsWM3MLL9S1+OIiFnArKqyqRXvHwJq3mYbEe8qMzYzM9s6fnLczMwKceIwM7NCnDjMzKwQJw4zMyvEicPMzApx4jAzs0KcOMzMrBAnDjMzK8SJw8zMCnHiMDOzQpw4zMysECcOMzMrxInDzMwKceIwM7NCtpg4JO3RG4GYmVnfkKfF8Yik2yWdkpZ37VdmdHTR8cwaHlnxAsdOvo8ZHV2NDsnMrKnlSRwHAdOAvwCWSfpHSQeVG1bvmNHRxRV3PM66jZsA6FqzlivueNzJw8ysji0mjsj8LCLOJlsX/KPAHEn3Szqm9AhLNGX2Utau37hZ2dr1G5kye2mDIjIza35bXDo2rfv9YbIWx2+AS4CZwJHA7cDIMgMs06o1awuVm5lZvjXHHwJuBCZERGdF+TxJU3s4pk/Yr3UIXTWSxH6tQxoQjZlZ35BnjGN0RHypKmkAEBFfKSGmXjNp/GiGDGrZrGzIoBYmjR/doIjMzJpfnsRxj6TW7g1JQyXNLjGmXjNhbBtXTzyMwS3Zt6GtdQhXTzyMCWPbGhyZmVnzytNVNSwi1nRvRMSLkvYuMaZeNWFsG7fMeQaA2y7s02P9Zma9Ik+LY6OkA7o3JB0IRJ6TSzpJ0lJJyyRdXmP/UEl3SlooaY6kQyv2fUrSYkmLJN0iaec81zQzs3LlSRyfBf5T0o2SbgQeAK7Y0kGSWoBrgZOBMcDZksZUVbsSmB8RhwMfAa5Jx7YBfwO0R8ShQAtwVr4vyczMyrTFrqqIuFvSUcA7AAGfiojnc5x7HLAsIpYDSLoVOB1YUlFnDHB1us6TkkZI2qcitiGS1gO7AKtyfk1mZlaivJMcbgSeA14Cxkg6LscxbcDKiu3OVFZpATARQNI44EBgeER0Af8EPAM8C7wUEffUuoikCyTNkzRv9erVOb8cMzPbWnkmOTyPrHtqNnBV+veLOc5da16r6rGRycBQSfPJHizsADZIGkrWOhkJ7AfsKunDtS4SEdMioj0i2ocNG5YjLDMz2xZ5WhyXAn8E/CoijgfGAnn+tO8E9q/YHk5Vd1NEvBwR50bEkWRjHMOAFcB7gBURsToi1gN3AO/McU0zMytZnsTxu4j4HYCknSLiSSDPE3JzgVGSRkoaTDa4PbOygqTWtA+yebAeiIiXybqo3iFplzQj74nAE/m+JDMzK1Oe5zg60wOAM4CfSXqRHAPVEbFB0sVkXVstwPURsVjSRWn/VOBg4AZJG8kGzT+e9j0iaTrwGLCBrAtrWuGvzszMtrs8d1Wdkd5+UdLPgd2Bu/OcPCJmAbOqyqZWvH8IGNXDsV8AvpDnOmZm1nvqJg5JOwAL07MURMT9vRKVmZk1rbpjHBGxCVhQ+eS4mZkNbHnGOPYFFkuaA7zaXRgRp5UWlZmZNa08ieOq0qMwM7M+I8/guMc1zMzsDXmWjn2F3z/xPRgYBLwaEbuVGZiZmTWnPC2ON1VuS5pANoGhmZkNQHknOXxDRMwATighFjMz6wPydFVNrNjcAWgn50JOZmbW/+S5q+r9Fe83AE+TzVxrZmYDUJ4xjnN7IxAzM+sb8qzH8W9pksPu7aGSri83LDMza1Z5BscPj4g13RsR8SLZmhxmZjYA5UkcO6QV+QCQtAf5xkbMzKwfypMAvgb8V1ofI4APAl8uNSozM2taeQbHb5A0j+zZDQETI2JJ6ZGZmVlTyjM4/g5gZUR8OyL+GVgp6ejyQ+sdMzq66HhmDY+seIFjJ9/HjI6uRodkZtbU8oxxfAf4bcX2q6msz5vR0cUVdzzOuo2bAOhas5Yr7njcycPMrI48iUMR8caT4mlxp34xOD5l9lLWrt+4Wdna9RuZMntpgyIyM2t+eRLHckl/I2lQel0KLC87sN6was3aQuVmZpYvcVwEvBPoAjqBo4Hzywyqt+zXOqRQuZmZ5UgcEfFcRJwVEXtHxD7Ax4F3lx5ZL5g0fjRDBrVsVjZkUAuTxo9uUERmZs0v17TqkloknSzpBmAF8KFyw+odE8a2cfXEwxjckn0b2lqHcPXEw5gwtq3BkZmZNa+6g9ySjgPOAd4HzAGOBd4cEa/lObmkk4BrgBbguoiYXLV/KHA98Bbgd8DHImKRpNHAbRVV3wx8PiK+meurKmDC2DZumfMMALddeMz2Pr2ZWb/TY+KQ1Ak8Q3br7aSIeEXSigJJowW4Fngv2djIXEkzqx4evBKYHxFnSHpbqn9iRCwFjqw4TxdwZ/Evz8zMtrd6XVU/AtrIuqXeL2lXii3gNA5YFhHLI2IdcCv/ex2PMcC9ABHxJDBC0j5VdU4EnoqIXxW4tpmZlaTHxBERlwIjgK8DxwO/BIZJ+qCkP8hx7jZgZcV2ZyqrtACYCCBpHHAgMLyqzlnALT1dRNIFkuZJmrd69eocYZmZ2baoOzgemfsi4nyyJHIOMIFsFcAtUa1TVm1PBoZKmg9cAnSQrTKYnUAaDJwG3F4nxmkR0R4R7cOGDcsRlpmZbYvcT4BHxHrgLuAuSXkedOgE9q/YHg6sqjrny8C5AJJEdsfWiooqJwOPRcRv8sZpZmblynU7brWIyPNo9VxglKSRqeVwFjCzsoKk1rQP4DzggZRMup1NnW4qMzPrfaXNORURGyRdDMwmux33+ohYLOmitH8qcDBwg6SNwBKyhwsBkLQL2R1ZF5YVo5mZFVfqZIURMQuYVVU2teL9Q8CoHo59DdizzPjMzKy4LSYOSQcBk8jueHqjfkScUGJcZmbWpPK0OG4HpgLfBTZuoa6ZmfVzeRLHhojoFws3mZnZtstzV9Vdkv5a0r6S9uh+lR6ZmZk1pTwtjo+mfydVlAXZxINmZjbAbDFxRMTI3gjEzMz6hjx3VQ0C/go4LhX9Avi/6UlyMzMbYPJ0VX0HGAT8S9r+i1R2XllBmZlZ88qTOP4oIo6o2L5P0oKyAjIzs+aW566qjZLe0r0h6c34eQ4zswErT4tjEvBzScvJpko/kDSjrZmZDTx57qq6V9IoYDRZ4ngyIl4vPTIzM2tK9dYcPyEi7pM0sWrXWyQREXeUHJuZmTWhei2OPwHuA95fY18AThxmZgNQj4kjIr6Q3v5DRFSuyockPxRoZjZA5bmr6kc1yqZv70DMzKxvqDfG8TbgEGD3qnGO3YCdyw7MzMyaU70xjtHAqUArm49zvAKcX2ZQZmbWvOqNcfwY+LGkY9ISr2ZmZrkeAOyQ9Amybqs3uqgi4mOlRWVmZk0rz+D4jcAfAuOB+4HhZN1VZmY2AOVJHG+NiM8Br0bEvwHvAw4rNywzM2tWeRJH97obayQdCuwOjCgtIjMza2p5Esc0SUOBzwEzgSXAV/OcXNJJkpZKWibp8hr7h0q6U9JCSXNSYure1yppuqQnJT0h6ZicX5OZmZUozySH16W391NgnXFJLcC1wHuBTmCupJkRsaSi2pXA/Ig4Iz03ci1wYtp3DXB3RJwpaTCwS95rm5lZeeo9AHhZvQMj4utbOPc4YFlELE/nuxU4nazF0m0McHU635OSRkjaB1hLtlTtX6Z964B1W7iemZn1gnpdVW9Kr3ayNcfb0usisl/4W9IGrKzY7kxllRYAEwEkjSNb62M4WctmNfCvkjokXSdp11oXkXSBpHmS5q1evTpHWGZmti16TBwRcVVEXAXsBRwVEX8bEX8LvJ3sl/uWqNZpq7YnA0MlzQcuATqADWQtoaOA70TEWOBV4H+NkaQ4p0VEe0S0Dxs2LEdYZma2LfI8AHgAm3cTrSPfXVWdwP4V28OBVZUVIuJl0mqCkgSsSK9dgM6IeCRVnU4PicPMzHpXnsRxIzBH0p1kLYYzgBtyHDcXGJWmYO8CzgLOqawgqRV4LY1hnAc8kJLJy5JWShodEUvJBsyXYGZmDZfnrqovS/op8K5UdG5EdOQ4boOki4HZQAtwfUQslnRR2j8VOBi4QdJGssTw8YpTXALclO6oWo7XOTczawr17qraLSJelrQH8HR6de/bIyJe2NLJI2IWMKuqbGrF+4eAUT0cO59sYN7MzJpIvRbHzWTTqj/K5oPaStu5n+kwM7P+o9606qemf71MrJmZvaFeV9VR9Q6MiMe2fzhmZtbs6nVVfa3OvgBO2M6xmJlZH1Cvq+r43gzEzMz6hjzPcZBmrR3D5isA5nmWw8zM+pktJg5JXwDeTZY4ZgEnA/9JvocAzcysn8mzHseZZE9u/zoizgWOAHYqNSozM2taeRLH2ojYBGyQtBvwHH6Gw8xswMozxjEvzSn1XbKHAX8LzCk1KjMza1r1nuP4NnBzRPx1Kpoq6W5gt4hY2CvRmZlZ06nX4vhv4GuS9gVuA25J80eZmdkAVm8hp2si4hjgT4AXyFbje0LS5yUd1GsRmplZU9ni4HhE/CoivpJW4juHbD2OJ0qPzMzMmtIWE4ekQZLeL+km4KfAL4EPlB6ZmZk1pXqD4+8FzgbeR3YX1a3ABRHxai/FZmZmTaje4PiVZGtyfDrPok1mZjYweJJDMzMrJM+T42ZmZm9w4jAzs0KcOMzMrBAnDjMzK8SJw8zMCik1cUg6SdJSScskXV5j/1BJd0paKGlOWmmwe9/Tkh6XNF/SvDLjNDOz/HItHbs1JLUA1wLvBTqBuZJmRsSSimpXAvMj4gxJb0v1T6zYf3xEPF9WjGZmVlyZLY5xwLKIWB4R68iePD+9qs4Y4F6AiHgSGCFpnxJjMjOzbVRm4mgDVlZsd6aySguAiQCSxgEHAsPTvgDukfSopAt6uoikCyTNkzRv9erV2y14MzOrrczEoRplUbU9GRgqaT5wCdABbEj7jo2Io4CTgU9IOq7WRSJiWkS0R0T7sGHDtlPoZmbWk9LGOMhaGPtXbA8HVlVWiIiXgXMBJAlYkV5ExKr073OS7iTr+nqgxHjNzCyHMlscc4FRkkZKGgycBcysrCCpNe0DOA94ICJelrSrpDelOrsCfwosKjFWMzPLqbQWR0RskHQxMBtoAa6PiMWSLkr7pwIHAzdI2ggsAT6eDt8HuDNrhLAj2drnd5cVq5mZ5VdmVxURMQuYVVU2teL9Q8CoGsctB44oMzYzM9s6fnLczMwKceIwM7NCnDjMzKwQJw4zMyvEicPMzApx4jAzs0KcOMzMrBAnDjMzK8SJw8zMCnHiMDOzQpw4zMysECcOMzMrxInDzMwKceIwM7NCnDjMzKwQJw4zMyvEicPMzApx4jAzs0KcOMzMrBAnDjMzK8SJw8zMCnHiMDOzQpw4zMyskFITh6STJC2VtEzS5TX2D5V0p6SFkuZIOrRqf4ukDkk/KTNOMzPLr7TEIakFuBY4GRgDnC1pTFW1K4H5EXE48BHgmqr9lwJPlBWjmZkVV2aLYxywLCKWR8Q64Fbg9Ko6Y4B7ASLiSWCEpH0AJA0H3gdcV2KMZmZWUJmJow1YWbHdmcoqLQAmAkgaBxwIDE/7vgl8BthU7yKSLpA0T9K81atXb4+4zcysjjITh2qURdX2ZGCopPnAJUAHsEHSqcBzEfHoli4SEdMioj0i2ocNG7bNQZuZWX07lnjuTmD/iu3hwKrKChHxMnAugCQBK9LrLOA0SacAOwO7SfpBRHy4xHjNzCyHMlscc4FRkkZKGkyWDGZWVpDUmvYBnAc8EBEvR8QVETE8Ikak4+5z0jAzaw6lJY6I2ABcDMwmuzPqhxGxWNJFki5K1Q4GFkt6kuzuq0vLiqcnMzq66HhmDY+seIFjJ9/HjI6u3g7BzKxPUUT1sEPf1d7eHvPmzctdf0ZHF1fc8Thr1298o2zIoBaunngYE8ZWj+ObmfU/kh6NiPYixwzoJ8enzF66WdIAWLt+I1NmL21QRGZmzW9AJ45Va9YWKjczswGeOPZrHVKo3MzMBnjimDR+NEMGtWxWNmRQC5PGj25QRGZmza/M5ziaXvcA+JTZS1m1Zi37tQ5h0vjRHhg3M6tjQCcOyJKHE4WZWX4DuqvKzMyKc+IwM7NCnDjMzKwQJw4zMyvEicPMzArpV3NVSVoN/KrgYXsBz5cQzvbSzPE1c2zg+LZFM8cGjm9bVcZ3YEQUWsyoXyWOrSFpXtEJvnpTM8fXzLGB49sWzRwbOL5tta3xuavKzMwKceIwM7NCnDhgWqMD2IJmjq+ZYwPHty2aOTZwfNtqm+Ib8GMcZmZWjFscZmZWiBOHmZkVMmATh6STJC2VtEzS5U0Qz/6Sfi7pCUmLJV2ayveQ9DNJ/53+HdrAGFskdUj6SRPG1ippuqQn0/fwmCaL71Pp57pI0i2Sdm5kfJKul/ScpEUVZT3GI+mK9FlZKml8g+Kbkn6+CyXdKam1EfHViq1i36clhaS9GhFbvfgkXZJiWCzpq9sUX0QMuBfQAjwFvBkYDCwAxjQ4pn2Bo9L7NwG/BMYAXwUuT+WXA19pYIyXATcDP0nbzRTbvwHnpfeDgdZmiQ9oA1YAQ9L2D4G/bGR8wHHAUcCiirKa8aT/hwuAnYCR6bPT0oD4/hTYMb3/SqPiqxVbKt8fmE32EPJeTfa9Ox74d2CntL33tsQ3UFsc44BlEbE8ItYBtwKnNzKgiHg2Ih5L718BniD7hXM62S9F0r8TGhGfpOHA+4DrKoqbJbbdyD4s3wOIiHURsaZZ4kt2BIZI2hHYBVhFA+OLiAeAF6qKe4rndODWiHg9IlYAy8g+Q70aX0TcExEb0ubDwPBGxNfD9w7gG8BngMo7jpriewf8FTA5Il5PdZ7blvgGauJoA1ZWbHemsqYgaQQwFngE2CcinoUsuQB7Nyisb5J9KDZVlDVLbG8GVgP/mrrSrpO0a7PEFxFdwD8BzwDPAi9FxD3NEl+FnuJpxs/Lx4CfpvcNj0/SaUBXRCyo2tXw2JKDgHdJekTS/ZL+KJVvVXwDNXGoRllT3Jcs6Q+AHwGfjIiXGx0PgKRTgeci4tFGx9KDHcma5t+JiLHAq2RdLU0hjRWcTtYVsB+wq6QPNzaqQprq8yLps8AG4KbuohrVei0+SbsAnwU+X2t3jbJGfO92BIYC7wAmAT+UJLYyvoGaODrJ+iO7DSfrOmgoSYPIksZNEXFHKv6NpH3T/n2B53o6vkTHAqdJepqsW+8EST9oktgg+3l2RsQjaXs6WSJplvjeA6yIiNURsR64A3hnE8XXrad4mubzIumjwKnAn0fqpKfx8b2F7I+CBekzMhx4TNIfNkFs3TqBOyIzh6znYK+tjW+gJo65wChJIyUNBs4CZjYyoJT9vwc8ERFfr9g1E/hoev9R4Me9HVtEXBERwyNiBNn36r6I+HAzxJbi+zWwUtLoVHQisIQmiY+si+odknZJP+cTycawmiW+bj3FMxM4S9JOkkYCo4A5vR2cpJOAvwNOi4jXKnY1NL6IeDwi9o6IEekz0kl2o8uvGx1bhRnACQCSDiK7geT5rY6vzNH9Zn4Bp5DdufQU8NkmiOePyZqIC4H56XUKsCdwL/Df6d89Ghznu/n9XVVNExtwJDAvff9mkDXLmym+q4AngUXAjWR3sTQsPuAWsvGW9WS/6D5eLx6yrpingKXAyQ2KbxlZf3z352NqI+KrFVvV/qdJd1U10fduMPCD9P/vMeCEbYnPU46YmVkhA7WryszMtpITh5mZFeLEYWZmhThxmJlZIU4cZmZWiBOH9RmSviHpkxXbsyVdV7H9NUmX1Tn++5LOTO9/Iam9Rp1BkianGWIXSWCZRw0AAAPJSURBVJoj6eS07+nKWU8LxP3GdXvYf62k+ZKWSFqb3s+XdKakWZWzwG4vkvZVmuW4h/2DJT2Q5tYy24wTh/Ul/0X2xDWSdiB78vWQiv3vBB7cxmt8iWym4kMj4lDg/WSzFZcmIj4REUeSPbfzVEQcmV7TI+KUyCZs3N4uA75bJ6Z1ZM9yfKiEa1sf58RhfcmDpMRBljAWAa9IGippJ+BgoEPS5yXNTS2Gaelp7S1Kcw6dD1wSv59F9DcR8cMadS9L519U1Qr6iLL1IhZIurHGcV9KLZBcn73uVo6kEcrWorguXfMmSe+R9GBqHY1L9XdVth7D3DThY0+zPn8AuDsdc0hqWc1PsY9KdWYAf54nThtY3Ay1PiMiVknaIOkAsgTyENlMnscALwELI2KdpG9HxD8ApF/epwJ35bjEW4FnYguTS0p6O3AucDTZJHGPSLofWEf2FO6xEfG8pD2qjvsqsDtwbmzdk7dvBf4MuIBs2pxzyGYcOA24kmwa9M+STQnzsdTFNUfSv0fEqxVxjARe7E6OwEXANRFxU5qCpyWVLwK6Z1E1e4NbHNbXdLc6uhPHQxXb/5XqHK9s+ujHyebnOaTWibbBHwN3RsSrEfFbskkL35WuNT0ingeIiMo1ET4HtEbEhVuZNCCbKPHxiNgELAbuTed6HBiR6vwpcLmk+cAvgJ2BA6rOsy/ZNPTdHgKulPR3wIERsTbFvxFYJ6nUrjrre5w4rK/pHuc4jOwv4ofJWhzvBB6UtDPwL8CZEXEYWT/+zjnPvQw4IMcvyp66vkTPU1LPBd5e3Qop6PWK95sqtjfx+94DAR+oGCc5ICKeqDrPWiq+JxFxM1mrZS0wW9IJFXV3An63DTFbP+TEYX3Ng2RdTy9ExMb0V30rWfJ4iN//Qnxe2domPd7NVC2yGVe/B3wrddl0331UvXbGA8CENNvtrsAZwH+QDSZ/UNKe6djKJHE3MBn4fyX/BT8buKR7XEfS2Bp1fsnvWyhIejOwPCK+RTZb6uGpfE+geyp4szc4cVhf8zjZ3VQPV5W9FBHPpzuQvpvKZpD9pV/E35N14yyRtCido7Jbh8iW+P0+2fTTjwDXRURHRCwGvgzcL2kB8PWq425Psc2UNKRgXHl9CRgELEzxf6m6QhrveErSW1PRh4BFqXvrbcANqfx4YFZJcVof5tlxzQYgSWcAb4+Iv69T5w7giohY2nuRWV/gu6rMBqCIuLO7S62W1FU3w0nDanGLw8zMCvEYh5mZFeLEYWZmhThxmJlZIU4cZmZWiBOHmZkV8v8BDRKXrftUbuAAAAAASUVORK5CYII=",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAgAElEQVR4nO3de5xdVX338c+XMYGAwAQJFCZAUGMkAhKdBhFvoDaAKCFSBR4vjXJrhVJtY4HWW30osalWfKTmiZQqlpsgidEnEikoqQgkgxNyI2ljQJgJhaEYgjCSZPJ7/thr4OSwZ2YnzJ4zc873/XrNa85ee529f5sh53fWWnuvpYjAzMys2m61DsDMzIYnJwgzM8vlBGFmZrmcIMzMLJcThJmZ5XKCMDOzXE4QZrtA0tslrat1HGZlcoKwEUfSw5LeU8sYIuI/ImJSWceXNE3SEknPSOqSdJekD5R1PrM8ThBmOSQ11fDcZwA3A9cC44EDgc8D79+FY0mS/53bLvH/OFY3JO0m6RJJv5b0P5K+L2m/iv03S/pvSU+nb+dvqNj3HUnfkrRI0rPACaml8leSVqT33CRpj1T/XZI6Kt7fZ920/7OSHpO0UdI5kkLSa3OuQcDXgC9HxNUR8XREbI+IuyLi3FTni5L+reI9E9LxXpG2fy7pckl3A88Bl0lqqzrPpyUtTK93l/SPkh6R9LikuZLGvMw/h9UBJwirJ38OTAfeCRwM/Ba4qmL/T4CJwAHAr4Drqt5/NnA5sDfwi1T2IeAk4HDgaOBP+jl/bl1JJwGfAd4DvDbF15dJwCHALf3UKeKjwHlk1/J/gEmSJlbsPxu4Pr3+CvA64JgUXwtZi8UanBOE1ZPzgb+JiI6IeB74InBG7zfriLgmIp6p2PdGSftWvP+HEXF3+sb++1T2jYjYGBFPAT8i+xDtS191PwT8a0SsjojngC/1c4xXpd+PFb7qfN9J59sWEU8DPwTOAkiJ4vXAwtRiORf4dEQ8FRHPAH8PnPkyz291wAnC6slhwHxJmyRtAh4EeoADJTVJmp26nzYDD6f37F/x/kdzjvnfFa+fA17Zz/n7qntw1bHzztPrf9Lvg/qpU0T1Oa4nJQiy1sOClKzGAXsC91f8d7stlVuDc4KwevIocHJENFf87BERnWQfiqeRdfPsC0xI71HF+8ua2vgxssHmXof0U3cd2XV8sJ86z5J9qPf6g5w61dfyU2B/SceQJYre7qUngW7gDRX/zfaNiP4SoTUIJwgbqUZJ2qPi5xXAXOBySYcBSBon6bRUf2/gebJv6HuSdaMMle8DMyUdIWlP+unfj2z+/c8An5M0U9I+afD9bZLmpWrLgXdIOjR1kV06UAARsY1sXGMOsB9weyrfDnwb+CdJBwBIapE0bZev1uqGE4SNVIvIvvn2/nwRuBJYCPxU0jPAvcCxqf61wG+ATmBN2jckIuInwDeAnwHrgXvSruf7qH8L8GHgE8BG4HHgf5ONIxARtwM3ASuA+4EfFwzlerIW1M0pYfT66xTXvan77d/JBsutwckLBpkNLUlHAKuA3as+qM2GFbcgzIaApNMljZY0luy20h85Odhw5wRhNjTOB7qAX5PdWfWntQ3HbGDuYjIzs1xuQZiZWa5X1DqAwbT//vvHhAkTah2GmdmIcf/99z8ZEbkPRtZVgpgwYQJtbW0DVzQzMwAk/aavfe5iMjOzXE4QZmaWywnCzMxyOUGYmVmu0hKEpGskPSFpVR/7JekbktanVbjeVLHvJEnr0r5LyorRzMz6VuZdTN8Bvkk2SVqek8lW95pINqHat4Bj01rAVwHvBTqAZZIWRsSaEmM1qzsL2juZs3gdGzd1c3DzGGZNm8T0KS21DssGUdl/49ISREQskTShnyqnAdem6Y3vldQs6SCyefrXR8QGAEk3prpOEDZo6v3Dc0F7J5feupLurT0AdG7q5tJbVwLU1XU2sqH4G9fyOYgWdlz1qiOV5ZUfSx8knUe29i6HHnro4EdpdacRPjznLF73wvX16t7aw2dvWcENSx+pUVQ2mNof2cSWnu07lHVv7WHO4nV1kSCUUxb9lOeKiHnAPIDW1lZPLGUDaoQPz85N3bnl1R8oNnL19bfc2MffflfUMkF0sOPSi+PJFkcZ3Ue52aDo6x9QPX14jm7aLfd6WprHcNP5x9UgIhtsx8++M/eLwMHNYwbtHLVMEAuBC9MYw7HA0xHxmKQuYKKkw8lW/zqTbD3hmqv3futGcXDzmNx/WPX04VndjQYwZlQTs6Z5obh6MWvapNL/xqUlCEk3AO8iWyi9A/gCMAogIuaSLRl5CtlSh88BM9O+bZIuBBYDTcA1EbG6rDgr9ZcAGqHfulEMxT+sWuv9f9JfaOrXUPyN62o9iNbW1tjVyfr+dsFKrrv3kR0GO8aMauKKGUcxfUpLn8250U27MeXQ5l2M2Grlyd89z4auZwmyloM/PK1RSbo/Ilrz9tXVbK67akF750uSA+w4cOlBv/qy/yt3Z/9X7s5px7Rw9rG++80sjxMEWROtr3ZUbwLwoJ+ZNRrPxUTftwTCiwngH844mjGjmnbYV2/91mZmldyCAJokevoYi+lNAB70M7NG4wQBfSYH2PEOpelTWpwQzKxhuIuJrBtpZ8rNzBqBEwRZN5LHF8zMduQuJl7sRvrsLSvY0rPd98WbmeEWBPDiE9RberYzumk3JwczM9yCeMkUGlt6tnsKDTMz3ILoc+rnOYvX1SgiM7PhoeETRF9TPw/mnOpmZiNRwyeIvuZOH8w51c3MRqKGTxC+xdXMLF/DD1L7Flczs3wNnyAgSxK9axF7ZlYzs0zDdzGZmVk+JwgzM8vlBGFmZrmcIMzMLJcThJmZ5XKCMDOzXE4QZmaWq9QEIekkSeskrZd0Sc7+sZLmS1ohaamkIyv2XSxplaTVkv6izDjNzOylSksQkpqAq4CTgcnAWZImV1W7DFgeEUcDHwOuTO89EjgXmAq8EThV0sSyYjUzs5cqswUxFVgfERsiYgtwI3BaVZ3JwB0AEbEWmCDpQOAI4N6IeC4itgF3AaeXGKuZmVUpM0G0AI9WbHekskoPADMAJE0FDgPGA6uAd0h6laQ9gVOAQ/JOIuk8SW2S2rq6ugb5EszMGleZCUI5ZVG1PRsYK2k5cBHQDmyLiAeBrwC3A7eRJZJteSeJiHkR0RoRrePGjRu04M3MGl2Zk/V1sOO3/vHAxsoKEbEZmAkgScBD6YeI+BfgX9K+v0/HMzOzIVJmC2IZMFHS4ZJGA2cCCysrSGpO+wDOAZakpIGkA9LvQ8m6oW4oMVYzM6tSWgsiIrZJuhBYDDQB10TEakkXpP1zyQajr5XUA6wBPllxiB9IehWwFfhURPy2rFjNzOylSl0PIiIWAYuqyuZWvL4HyL19NSLeXmZsZmbWPz9JbWZmuZwgzMwslxOEmZnlcoIwM7NcThBmZpbLCcLMzHI5QZiZWS4nCDMzy+UEYWZmuZwgzMwslxOEmZnlcoIwM7NcThBmZpbLCcLMzHINmCAk7TcUgZiZ2fBSpAVxn6SbJZ2SlgWtOwvaO2l/ZBP3PfQUx8++kwXtnbUOycys5ookiNcB84CPAusl/b2k15Ub1tBZ0N7JpbeuZEvPdgA6N3Vz6a0rnSTMrOENmCAic3tEnEW2bvTHgaWS7pJ0XOkRlmzO4nV0b+3Zoax7aw9zFq+rUURmZsPDgEuOpnWhP0LWgngcuAhYCBwD3AwcXmaAZdu4qXunys3MGkWRNanvAb4HTI+IjoryNklz+3jPiHFw8xg6c5LBwc1jahCNmdnwUWQMYlJEfLkqOQAQEV8pIaYhNWvaJMaMatqhbMyoJmZNm1SjiMzMhociCeKnkpp7NySNlbS4xJiG1PQpLVwx4yhGN2X/KVqax3DFjKOYPqWlxpGZmdVWkS6mcRGxqXcjIn4r6YASYxpy06e0cMPSRwC46fwRP+5uZjYoirQgeiQd2rsh6TAgihxc0kmS1klaL+mSnP1jJc2XtELSUklHVuz7tKTVklZJukHSHkXOaWZmg6NIgvgb4BeSvifpe8AS4NKB3iSpCbgKOBmYDJwlaXJVtcuA5RFxNPAx4Mr03hbgz4HWiDgSaALOLHZJZmY2GAbsYoqI2yS9CXgLIODTEfFkgWNPBdZHxAYASTcCpwFrKupMBq5I51kraYKkAytiGyNpK7AnsLHgNZmZ2SAoOllfD/AE8DQwWdI7CrynBXi0YrsjlVV6AJgBIGkqcBgwPiI6gX8EHgEeA56OiJ/mnUTSeZLaJLV1dXUVvBwzMxtIkcn6ziHrVloMfCn9/mKBY+fN21Q9djEbGCtpOdkDeO3ANkljyVobhwMHA3tJ+kjeSSJiXkS0RkTruHHjCoRlZmZFFGlBXAz8IfCbiDgBmAIU+areARxSsT2eqm6iiNgcETMj4hiyMYhxwEPAe4CHIqIrIrYCtwJvLXBOMzMbJEUSxO8j4vcAknaPiLVAkafIlgETJR0uaTTZIPPCygqSmtM+yOZ5WhIRm8m6lt4iac80g+y7gQeLXZKZmQ2GIs9BdKQH5RYAt0v6LQUGjCNim6QLybqkmoBrImK1pAvS/rnAEcC1knrIBq8/mfbdJ+kW4FfANrKup3k7fXVmZrbLitzFdHp6+UVJPwP2BW4rcvCIWAQsqiqbW/H6HmBiH+/9AvCFIucxM7PB12+CkLQbsCI9i0BE3DUkUZmZWc31OwYREduBByqfpDYzs8ZQZAziIGC1pKXAs72FEfGB0qIyM7OaK5IgvlR6FGZmNuwUGaT2uIOZWQMqsuToM7z4BPRoYBTwbETsU2ZgZmZWW0VaEHtXbkuaTjYRn5mZ1bGik/W9ICIWACeWEIuZmQ0jRbqYZlRs7ga0UnDBIDMzG7mK3MX0/orX24CHyWZaNTOzOlZkDGLmUARiZmbDS5H1IL6bJuvr3R4r6ZpywzIzs1orMkh9dERs6t2IiN+SrQlhZmZ1rEiC2C2t8AaApP0oNnZhZmYjWJEP+q8Cv0zrMwTwIeDyUqMyM7OaKzJIfa2kNrJnHwTMiIg1pUdmZmY1VeQ5iLcAqyPim2l7b0nHRsR9pUc3BBa0dzJn8To6N3Uzumk3FrR3Mn1KS63DMjOruSJjEN8Cflex/WwqG/EWtHdy6a0r6dzUDcCWnu1ceutKFrR31jgyM7PaK5IgFBEvPDmdFhGqi0HqOYvX0b21Z4ey7q09zFm8rkYRmZkNH0USxAZJfy5pVPq5GNhQdmBDYWNqORQtNzNrJEUSxAXAW4FOoAM4Fji3zKCGysHNY3aq3MyskQyYICLiiYg4MyIOiIgDgU8C7yo9siEwa9okxoxq2qFszKgmZk2bVKOIzMyGj0LTfUtqknSypGuBh4APlxvW0Jg+pYUrZhzF6KbsP0NL8xiumHGU72IyM2OAwWZJ7wDOBt4HLAWOB14dEc8VObikk4ArgSbg6oiYXbV/LHAN8Brg98AnImKVpEnATRVVXw18PiK+XuiqdsL0KS3csPQRAG46/7jBPryZ2YjVZ4KQ1AE8QnZL66yIeEbSQzuRHJqAq4D3ko1dLJO0sOohu8uA5RFxuqTXp/rvjoh1wDEVx+kE5u/85ZmZ2a7qr4vpB0ALWXfS+yXtxc4tFDQVWB8RGyJiC3AjL11HYjJwB0BErAUmSDqwqs67gV9HxG924txmZvYy9ZkgIuJiYALwNeAE4D+BcZI+JOmVBY7dAjxasd2Ryio9AMwAkDQVOAwYX1XnTOCGvk4i6TxJbZLaurq6CoRlZmZF9DtIHZk7I+JcsmRxNjCdbFW5gSjvkFXbs4GxkpYDFwHtZKvWZQeQRgMfAG7uJ8Z5EdEaEa3jxo0rEJaZmRVR+InoiNgK/Aj4kaQiDwp0AIdUbI8HNlYdczMwE0CSyO6QeqiiysnAryLi8aJxmpnZ4Ch0m2u1iCjyqPEyYKKkw1NL4ExgYWUFSc1pH8A5wJKUNHqdRT/dS2ZmVp7S5lSKiG2SLgQWk93mek1ErJZ0Qdo/FzgCuFZSD7CG7CE8ACTtSXYH1PllxWhmZn0rddK9iFgELKoqm1vx+h5gYh/vfQ54VZnxmZlZ34qsB/E6YBbZHUYv1I+IE0uMy8zMaqxIC+JmYC7wbaBngLpmZlYniiSIbRFRFwsEmZlZcUXuYvqRpD+TdJCk/Xp/So/MzMxqqkgL4uPp96yKsiCbQM/MzOrUgAkiIg4fikDMzGx4KXIX0yjgT4F3pKKfA/83PVltZmZ1qkgX07eAUcA/p+2PprJzygrKzMxqr0iC+MOIeGPF9p2SHigrIDMzGx6K3MXUI+k1vRuSXo2fhzAzq3tFWhCzgJ9J2kA2hfdhpBlYzcysfhW5i+kOSROBSWQJYm1EPF96ZGZmVlP9rUl9YkTcKWlG1a7XSCIibi05NjMzq6H+WhDvBO4E3p+zLwAnCDOzOtZngoiIL6SXfxcRlau8IckPz5mZ1bkidzH9IKfslsEOxMzMhpf+xiBeD7wB2LdqHGIfYI+yAzMzs9rqbwxiEnAq0MyO4xDPAOeWGZSZmdVef2MQPwR+KOm4tDSomZk1kCIPyrVL+hRZd9MLXUsR8YnSojIzs5orMkj9PeAPgGnAXcB4sm4mMzOrY0USxGsj4nPAsxHxXeB9wFHlhmVmZrVWJEH0rvuwSdKRwL7AhNIiMjOzYaFIgpgnaSzwOWAhsAb4hyIHl3SSpHWS1ku6JGf/WEnzJa2QtDQloN59zZJukbRW0oOSjit4TWZmNgiKTNZ3dXp5FzuxDrWkJuAq4L1AB7BM0sKIWFNR7TJgeUScnp67uAp4d9p3JXBbRJwhaTSwZ9Fzm5nZy9ffg3Kf6e+NEfG1AY49FVgfERvS8W4ETiNrgfSaDFyRjrdW0gRJBwLdZEuc/knatwXYMsD5zMxsEPXXxbR3+mklW5O6Jf1cQPbBPpAW4NGK7Y5UVukBYAaApKlka02MJ2updAH/Kqld0tWS9so7iaTzJLVJauvq6ioQlpmZFdFngoiIL0XEl4D9gTdFxF9GxF8Cbyb7EB+I8g5btT0bGCtpOXAR0A5sI2vZvAn4VkRMAZ4FXjKGkeKcFxGtEdE6bty4AmGZmVkRRR6UO5Qdu3e2UOwupg7gkIrt8cDGygoRsZm0Op0kAQ+lnz2Bjoi4L1W9hT4ShJmZlaNIgvgesFTSfLIWwOnAtQXetwyYmKYG7wTOBM6urCCpGXgujTGcAyxJSWOzpEclTYqIdWQD12swM7MhU+Qupssl/QR4eyqaGRHtBd63TdKFwGKgCbgmIlZLuiDtnwscAVwrqYcsAXyy4hAXAdelO5g24HWwzcyGVH93Me0TEZsl7Qc8nH569+0XEU8NdPCIWAQsqiqbW/H6HmBiH+9dTjZAbmZmNdBfC+J6sum+72fHwWWl7cLPRJiZ2cjT33Tfp6bfXl7UzKwB9dfF9Kb+3hgRvxr8cMzMbLjor4vpq/3sC+DEQY7FzMyGkf66mE4YykDMzGx4KfIcBGmW1cnsuKJckWchzMxshBowQUj6AvAusgSxCDgZ+AXFHpYzM7MRqsh6EGeQPcn83xExE3gjsHupUZmZWc0VSRDdEbEd2CZpH+AJ/AyEmVndKzIG0ZbmTPo22UNzvwOWlhqVmZnVXH/PQXwTuD4i/iwVzZV0G7BPRKwYkujMzKxm+mtB/BfwVUkHATcBN6T5kczMrAH0t2DQlRFxHPBO4Cmy1d0elPR5Sa8bsgjNzKwmBhykjojfRMRX0spuZ5OtB/Fg6ZGZmVlNDZggJI2S9H5J1wE/Af4T+GDpkZmZWU31N0j9XuAs4H1kdy3dCJwXEc8OUWxmZlZD/Q1SX0a2JsRfFVkcyMzM6osn6zMzs1xFnqQ2M7MG5ARhZma5nCDMzCyXE4SZmeVygjAzs1ylJghJJ0laJ2m9pEty9o+VNF/SCklL08p1vfselrRS0nJJbWXGaWZmL1VoydFdIakJuAp4L9ABLJO0MCLWVFS7DFgeEadLen2q/+6K/SdExJNlxWhmZn0rswUxFVgfERsiYgvZk9inVdWZDNwBEBFrgQmSDiwxJjMzK6jMBNECPFqx3ZHKKj0AzACQNBU4DBif9gXwU0n3Szqvr5NIOk9Sm6S2rq6uQQvezKzRlZkglFMWVduzgbGSlgMXAe3AtrTv+Ih4E3Ay8ClJ78g7SUTMi4jWiGgdN27cIIVuZmaljUGQtRgOqdgeD2ysrBARm4GZAJIEPJR+iIiN6fcTkuaTdVktKTFeMzOrUGYLYhkwUdLhkkYDZwILKytIak77AM4BlkTEZkl7Sdo71dkL+CNgVYmxmplZldJaEBGxTdKFwGKgCbgmIlZLuiDtnwscAVwrqQdYA3wyvf1AYH7WqOAVZGtj31ZWrGZm9lJldjEREYuARVVlcyte3wNMzHnfBuCNZcZmZmb985PUZmaWywnCzMxyOUGYmVkuJwgzM8vlBGFmZrmcIMzMLJcThJmZ5XKCMDOzXE4QZmaWywnCzMxyOUGYmVkuJwgzM8vlBGFmZrmcIMzMLJcThJmZ5XKCMDOzXE4QZmaWywnCzMxyOUGYmVkuJwgzM8vlBGFmZrmcIMzMLJcThJmZ5So1QUg6SdI6SeslXZKzf6yk+ZJWSFoq6ciq/U2S2iX9uMw4zczspUpLEJKagKuAk4HJwFmSJldVuwxYHhFHAx8DrqzafzHwYFkxmplZ38psQUwF1kfEhojYAtwInFZVZzJwB0BErAUmSDoQQNJ44H3A1SXGaGZmfSgzQbQAj1Zsd6SySg8AMwAkTQUOA8anfV8HPgts7+8kks6T1CapraurazDiNjMzyk0QyimLqu3ZwFhJy4GLgHZgm6RTgSci4v6BThIR8yKiNSJax40b97KDNjOzzCtKPHYHcEjF9nhgY2WFiNgMzASQJOCh9HMm8AFJpwB7APtI+reI+EiJ8ZqZWYUyWxDLgImSDpc0muxDf2FlBUnNaR/AOcCSiNgcEZdGxPiImJDed6eTg5nZ0CotQUTENuBCYDHZnUjfj4jVki6QdEGqdgSwWtJasrudLi4rnr4saO+k/ZFN3PfQUxw/+04WtHcOdQhmZsOSIqqHBUau1tbWaGtrK1x/QXsnl966ku6tPS+UjRnVxBUzjmL6lOrxdDOz+iPp/ohozdvX0E9Sz1m8bofkANC9tYc5i9fVKCIzs+GjoRPExk3dO1VuZtZIGjpBHNw8ZqfKzcwaSUMniFnTJjFmVNMOZWNGNTFr2qQaRWRmNnyU+RzEsNc7ED1n8To2burm4OYxzJo2yQPUZmY0eIKALEk4IZiZvVRDdzGZmVnfnCDMzCyXE4SZmeVygjAzs1xOEGZmlquu5mKS1AX8pmD1/YEnSwyn1nx9I5uvb2QbSdd3WETkLqZTVwliZ0hq62uCqnrg6xvZfH0jW71cn7uYzMwslxOEmZnlauQEMa/WAZTM1zey+fpGtrq4voYdgzAzs/41cgvCzMz64QRhZma5Gi5BSDpJ0jpJ6yVdUut4BoOkayQ9IWlVRdl+km6X9F/p99haxrirJB0i6WeSHpS0WtLFqbwurg9A0h6Slkp6IF3jl1J5PV1jk6R2ST9O23VzbQCSHpa0UtJySW2pbMRfY0MlCElNwFXAycBk4CxJk2sb1aD4DnBSVdklwB0RMRG4I22PRNuAv4yII4C3AJ9Kf7N6uT6A54ETI+KNwDHASZLeQn1d48XAgxXb9XRtvU6IiGMqnn8Y8dfYUAkCmAqsj4gNEbEFuBE4rcYxvWwRsQR4qqr4NOC76fV3gelDGtQgiYjHIuJX6fUzZB8yLdTJ9QFE5ndpc1T6CerkGiWNB94HXF1RXBfXNoARf42NliBagEcrtjtSWT06MCIeg+xDFjigxvG8bJImAFOA+6iz60tdMMuBJ4DbI6KervHrwGeB7RVl9XJtvQL4qaT7JZ2Xykb8NTbainLKKfN9viOApFcCPwD+IiI2S3l/ypErInqAYyQ1A/MlHVnrmAaDpFOBJyLifknvqnU8JTo+IjZKOgC4XdLaWgc0GBqtBdEBHFKxPR7YWKNYyva4pIMA0u8nahzPLpM0iiw5XBcRt6biurm+ShGxCfg52ZhSPVzj8cAHJD1M1qV7oqR/oz6u7QURsTH9fgKYT9adPeKvsdESxDJgoqTDJY0GzgQW1jimsiwEPp5efxz4YQ1j2WXKmgr/AjwYEV+r2FUX1wcgaVxqOSBpDPAeYC11cI0RcWlEjI+ICWT/3u6MiI9QB9fWS9JekvbufQ38EbCKOrjGhnuSWtIpZH2iTcA1EXF5jUN62STdALyLbIrhx4EvAAuA7wOHAo8AfxwR1QPZw56ktwH/AazkxT7sy8jGIUb89QFIOppsELOJ7Evb9yPi7yS9ijq5RoDUxfRXEXFqPV2bpFeTtRog67a/PiIur4drbLgEYWZmxTRaF5OZmRXkBGFmZrmcIMzMLJcThJmZ5XKCMDOzXE4QNmJI+idJf1GxvVjS1RXbX5X0mX7e/x1JZ6TXP5f0kkXlJY2SNDvNwLkqzbJ6ctr3sKT9dyHuF87bx/6r0iygayR1p9fLJZ0haVHvMxKDSdJBvTOr9rF/tKQlkhpttgWr4ARhI8kvgbcCSNqN7LmPN1Tsfytw98s8x5eBg4AjI+JI4P3A3i/zmP2KiE9FxDHAKcCv04ygx0TELRFxSnq6erB9Bvh2PzFtIZuB9MMlnNtGCCcIG0nuJiUIssSwCnhG0lhJuwNHAO2SPi9pWWoBzFPBiZsk7QmcC1wUEc8DRMTjEfH9nLqfScdfVdWq+ZikFWlth+/lvO/LqUVR6N9eb6tF0gRJayVdnc55naT3SLo7tXampvp7KVsfZJmy9Rf6mq34g8Bt6T1vSC2l5Sn2ianOAuB/FYnT6pObjzZipMnQtkk6lCxR3EM2G+9xwNPAiojYIumbEfF3AOlD+lTgRwVO8VrgkYjY3F8lSW8GZgLHkk0AeZ+ku4AtwN+QTdz2pKT9qt73D8C+wMzYtSdUXwv8MXAe2bQxZwNvAz5A9nT59HT+OyPiE6lraqmkf4+IZyviOBz4bW8SBC4AroyI69IUNE2pfBXwh7sQp9UJtyBspOltRfQmiBJxjigAAAIXSURBVHsqtn+Z6pwg6T5JK4ET2bEbajC8DZgfEc+mdRxuBd6eznVLRDwJUDWtwueA5og4fxeTA8BDEbEyIrYDq8kWowmyaUgmpDp/BFyibOrwnwN7kE31UOkgoKti+x7gMkl/DRwWEd0p/h5gS+88Q9Z4nCBspOkdhziK7BvuvWQtiLcCd0vaA/hn4IyIOIqsn32PgsdeDxxa4AOxry4r0ff08cuAN1e3KnbS8xWvt1dsb+fF3gABH6wYxzg0IipXcgPopuK/SURcT9YK6QYWSzqxou7uwO9fRsw2gjlB2EhzN1mX0VMR0ZO+pTeTJYl7ePGD70lla0j0efdQtYh4jmzm2G+krpbeu30+UlV1CTBd0p5p9s7TySYUvAP4UJqkjapkcBswG/h/JX8jXwxc1DvuImlKTp3/5MUWR+9kcxsi4htkM5AencpfBXRFxNYS47VhzAnCRpqVZHcv3VtV9nREPJnu+Pl2KltA9s19Z/wtWffLGkmr0jEqu2NIS6B+B1hKNqvs1RHRHhGrgcuBuyQ9AHyt6n03p9gWpmm9y/BlsiVLV6T4v1xdIY1H/FrSa1PRh4FVqVvq9cC1qfwEYFFJcdoI4NlczRqQpNOBN0fE3/ZT51bg0ohYN3SR2XDiu5jMGlBEzO/tCsuTutgWODk0NrcgzMwsl8cgzMwslxOEmZnlcoIwM7NcThBmZpbLCcLMzHL9f2YCmplXeUu2AAAAAElFTkSuQmCC",
"text/plain": [
"
"
+ ],
+ "text/plain": [
+ " timeStamp demand precip temp_above_monthly_avg\n",
+ "0 2012-01-01 4954.833333 0.002487 1\n",
+ "1 2012-01-02 5302.954167 0.000000 1\n",
+ "2 2012-01-03 6095.512500 0.000000 0\n",
+ "3 2012-01-04 6336.266667 0.000000 0\n",
+ "4 2012-01-05 6130.245833 0.000000 1\n",
+ "... ... ... ... ...\n",
+ "1864 2017-02-07 5861.319833 0.011938 1\n",
+ "1865 2017-02-08 5667.644708 0.001258 1\n",
+ "1866 2017-02-09 5947.661958 0.027029 0\n",
+ "1867 2017-02-10 6195.122500 0.000179 0\n",
+ "1868 2017-02-11 5461.026000 0.000492 1\n",
+ "\n",
+ "[1869 rows x 4 columns]"
+ ]
+ },
+ "execution_count": 18,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
"source": [
"# split data into train and test\n",
"num_samples = multi_df.shape[0]\n",
@@ -1687,7 +1545,9 @@
"multi_X_test = multi_test_df[\n",
" [\"timeStamp\", \"precip\", \"temp_above_monthly_avg\"]\n",
"] # test dataframe must contain values for the regressors / multivariate variables\n",
- "multi_y_test = multi_test_df[\"demand\"]"
+ "multi_y_test = multi_test_df[\"demand\"]\n",
+ "\n",
+ "multi_train_df"
]
},
{
@@ -1699,141 +1559,111 @@
},
{
"cell_type": "code",
- "execution_count": 18,
+ "execution_count": 19,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
- "[flaml.automl: 02-28 21:32:20] {2060} INFO - task = ts_forecast\n",
- "[flaml.automl: 02-28 21:32:20] {2062} INFO - Data split method: time\n",
- "[flaml.automl: 02-28 21:32:20] {2066} INFO - Evaluation method: holdout\n",
- "[flaml.automl: 02-28 21:32:20] {2147} INFO - Minimizing error metric: mape\n",
- "[flaml.automl: 02-28 21:32:20] {2205} INFO - List of ML learners in AutoML Run: ['lgbm', 'rf', 'xgboost', 'extra_tree', 'xgb_limitdepth', 'prophet', 'arima', 'sarimax']\n",
- "[flaml.automl: 02-28 21:32:20] {2458} INFO - iteration 0, current learner lgbm\n",
- "[flaml.automl: 02-28 21:32:20] {2573} INFO - Estimated sufficient time budget=269s. Estimated necessary time budget=0s.\n",
- "[flaml.automl: 02-28 21:32:20] {2620} INFO - at 0.1s,\testimator lgbm's best error=0.1103,\tbest estimator lgbm's best error=0.1103\n",
- "[flaml.automl: 02-28 21:32:20] {2458} INFO - iteration 1, current learner lgbm\n",
- "[flaml.automl: 02-28 21:32:20] {2620} INFO - at 0.1s,\testimator lgbm's best error=0.1103,\tbest estimator lgbm's best error=0.1103\n",
- "[flaml.automl: 02-28 21:32:20] {2458} INFO - iteration 2, current learner lgbm\n",
- "[flaml.automl: 02-28 21:32:20] {2620} INFO - at 0.1s,\testimator lgbm's best error=0.0983,\tbest estimator lgbm's best error=0.0983\n",
- "[flaml.automl: 02-28 21:32:20] {2458} INFO - iteration 3, current learner rf\n",
- "[flaml.automl: 02-28 21:32:20] {2620} INFO - at 0.1s,\testimator rf's best error=0.0972,\tbest estimator rf's best error=0.0972\n",
- "[flaml.automl: 02-28 21:32:20] {2458} INFO - iteration 4, current learner xgboost\n",
- "[flaml.automl: 02-28 21:32:20] {2620} INFO - at 0.2s,\testimator xgboost's best error=0.6523,\tbest estimator rf's best error=0.0972\n",
- "[flaml.automl: 02-28 21:32:20] {2458} INFO - iteration 5, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:32:20] {2620} INFO - at 0.2s,\testimator extra_tree's best error=0.1073,\tbest estimator rf's best error=0.0972\n",
- "[flaml.automl: 02-28 21:32:20] {2458} INFO - iteration 6, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:32:20] {2620} INFO - at 0.2s,\testimator xgb_limitdepth's best error=0.0820,\tbest estimator xgb_limitdepth's best error=0.0820\n",
- "[flaml.automl: 02-28 21:32:20] {2458} INFO - iteration 7, current learner prophet\n",
- "[flaml.automl: 02-28 21:32:24] {2620} INFO - at 4.4s,\testimator prophet's best error=0.0592,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:24] {2458} INFO - iteration 8, current learner arima\n",
- "[flaml.automl: 02-28 21:32:25] {2620} INFO - at 5.1s,\testimator arima's best error=0.6434,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:25] {2458} INFO - iteration 9, current learner sarimax\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "2016-08-16 00:00:00 2017-02-11 00:00:00 (180, 2)\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.0s,\testimator sarimax's best error=0.6434,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:26] {2458} INFO - iteration 10, current learner lgbm\n",
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.0s,\testimator lgbm's best error=0.0983,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:26] {2458} INFO - iteration 11, current learner xgboost\n",
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.0s,\testimator xgboost's best error=0.6523,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:26] {2458} INFO - iteration 12, current learner rf\n",
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.1s,\testimator rf's best error=0.0862,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:26] {2458} INFO - iteration 13, current learner xgboost\n",
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.1s,\testimator xgboost's best error=0.2637,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:26] {2458} INFO - iteration 14, current learner xgboost\n",
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.1s,\testimator xgboost's best error=0.0959,\tbest estimator prophet's best error=0.0592\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "2016-08-16 00:00:00 2017-02-11 00:00:00 (180, 2)\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "[flaml.automl: 02-28 21:32:26] {2458} INFO - iteration 15, current learner xgboost\n",
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.2s,\testimator xgboost's best error=0.0959,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:26] {2458} INFO - iteration 16, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.2s,\testimator extra_tree's best error=0.0961,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:26] {2458} INFO - iteration 17, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.2s,\testimator extra_tree's best error=0.0961,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:26] {2458} INFO - iteration 18, current learner xgboost\n",
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.2s,\testimator xgboost's best error=0.0959,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:26] {2458} INFO - iteration 19, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.3s,\testimator xgb_limitdepth's best error=0.0820,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:26] {2458} INFO - iteration 20, current learner xgboost\n",
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.3s,\testimator xgboost's best error=0.0834,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:26] {2458} INFO - iteration 21, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.4s,\testimator xgb_limitdepth's best error=0.0820,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:26] {2458} INFO - iteration 22, current learner lgbm\n",
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.4s,\testimator lgbm's best error=0.0925,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:26] {2458} INFO - iteration 23, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.4s,\testimator xgb_limitdepth's best error=0.0820,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:26] {2458} INFO - iteration 24, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.5s,\testimator extra_tree's best error=0.0922,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:26] {2458} INFO - iteration 25, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.5s,\testimator xgb_limitdepth's best error=0.0820,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:26] {2458} INFO - iteration 26, current learner rf\n",
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.5s,\testimator rf's best error=0.0862,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:26] {2458} INFO - iteration 27, current learner rf\n",
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.6s,\testimator rf's best error=0.0856,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:26] {2458} INFO - iteration 28, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:32:26] {2620} INFO - at 6.6s,\testimator xgb_limitdepth's best error=0.0820,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:27] {2458} INFO - iteration 29, current learner sarimax\n",
- "[flaml.automl: 02-28 21:32:28] {2620} INFO - at 7.9s,\testimator sarimax's best error=0.5313,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:28] {2458} INFO - iteration 30, current learner xgboost\n",
- "[flaml.automl: 02-28 21:32:28] {2620} INFO - at 8.0s,\testimator xgboost's best error=0.0834,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:28] {2458} INFO - iteration 31, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:32:28] {2620} INFO - at 8.0s,\testimator xgb_limitdepth's best error=0.0791,\tbest estimator prophet's best error=0.0592\n",
- "[flaml.automl: 02-28 21:32:28] {2458} INFO - iteration 32, current learner arima\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "2016-08-16 00:00:00 2017-02-11 00:00:00 (180, 2)\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "[flaml.automl: 02-28 21:32:30] {2620} INFO - at 10.3s,\testimator arima's best error=0.5998,\tbest estimator prophet's best error=0.0592\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "2016-08-16 00:00:00 2017-02-11 00:00:00 (180, 2)\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "[flaml.automl: 02-28 21:32:32] {2850} INFO - retrain prophet for 2.2s\n",
- "[flaml.automl: 02-28 21:32:32] {2857} INFO - retrained model: \n",
- "[flaml.automl: 02-28 21:32:32] {2234} INFO - fit succeeded\n",
- "[flaml.automl: 02-28 21:32:32] {2235} INFO - Time taken to find the best model: 4.351356506347656\n"
+ "[flaml.automl: 07-28 21:14:47] {2478} INFO - task = ts_forecast\n",
+ "[flaml.automl: 07-28 21:14:47] {2480} INFO - Data split method: time\n",
+ "[flaml.automl: 07-28 21:14:47] {2483} INFO - Evaluation method: holdout\n",
+ "[flaml.automl: 07-28 21:14:47] {2552} INFO - Minimizing error metric: mape\n",
+ "[flaml.automl: 07-28 21:14:47] {2694} INFO - List of ML learners in AutoML Run: ['lgbm', 'rf', 'xgboost', 'extra_tree', 'xgb_limitdepth', 'prophet', 'arima', 'sarimax']\n",
+ "[flaml.automl: 07-28 21:14:47] {2986} INFO - iteration 0, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:14:47] {3114} INFO - Estimated sufficient time budget=509s. Estimated necessary time budget=1s.\n",
+ "[flaml.automl: 07-28 21:14:47] {3161} INFO - at 0.1s,\testimator lgbm's best error=0.1103,\tbest estimator lgbm's best error=0.1103\n",
+ "[flaml.automl: 07-28 21:14:47] {2986} INFO - iteration 1, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:14:47] {3161} INFO - at 0.1s,\testimator lgbm's best error=0.1103,\tbest estimator lgbm's best error=0.1103\n",
+ "[flaml.automl: 07-28 21:14:47] {2986} INFO - iteration 2, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:14:47] {3161} INFO - at 0.2s,\testimator lgbm's best error=0.0983,\tbest estimator lgbm's best error=0.0983\n",
+ "[flaml.automl: 07-28 21:14:47] {2986} INFO - iteration 3, current learner rf\n",
+ "[flaml.automl: 07-28 21:14:47] {3161} INFO - at 0.2s,\testimator rf's best error=0.0968,\tbest estimator rf's best error=0.0968\n",
+ "[flaml.automl: 07-28 21:14:47] {2986} INFO - iteration 4, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:14:47] {3161} INFO - at 0.2s,\testimator lgbm's best error=0.0983,\tbest estimator rf's best error=0.0968\n",
+ "[flaml.automl: 07-28 21:14:47] {2986} INFO - iteration 5, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:14:47] {3161} INFO - at 0.3s,\testimator lgbm's best error=0.0925,\tbest estimator lgbm's best error=0.0925\n",
+ "[flaml.automl: 07-28 21:14:47] {2986} INFO - iteration 6, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:14:47] {3161} INFO - at 0.3s,\testimator lgbm's best error=0.0925,\tbest estimator lgbm's best error=0.0925\n",
+ "[flaml.automl: 07-28 21:14:47] {2986} INFO - iteration 7, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:14:47] {3161} INFO - at 0.3s,\testimator lgbm's best error=0.0925,\tbest estimator lgbm's best error=0.0925\n",
+ "[flaml.automl: 07-28 21:14:47] {2986} INFO - iteration 8, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:14:47] {3161} INFO - at 0.4s,\testimator lgbm's best error=0.0861,\tbest estimator lgbm's best error=0.0861\n",
+ "[flaml.automl: 07-28 21:14:47] {2986} INFO - iteration 9, current learner rf\n",
+ "[flaml.automl: 07-28 21:14:47] {3161} INFO - at 0.4s,\testimator rf's best error=0.0877,\tbest estimator lgbm's best error=0.0861\n",
+ "[flaml.automl: 07-28 21:14:47] {2986} INFO - iteration 10, current learner rf\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 0.4s,\testimator rf's best error=0.0877,\tbest estimator lgbm's best error=0.0861\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 11, current learner rf\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 0.5s,\testimator rf's best error=0.0877,\tbest estimator lgbm's best error=0.0861\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 12, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 0.5s,\testimator xgboost's best error=0.6523,\tbest estimator lgbm's best error=0.0861\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 13, current learner rf\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 0.6s,\testimator rf's best error=0.0836,\tbest estimator rf's best error=0.0836\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 14, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 0.6s,\testimator xgboost's best error=0.6523,\tbest estimator rf's best error=0.0836\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 15, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 0.6s,\testimator extra_tree's best error=0.1059,\tbest estimator rf's best error=0.0836\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 16, current learner rf\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 0.7s,\testimator rf's best error=0.0743,\tbest estimator rf's best error=0.0743\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 17, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 0.8s,\testimator extra_tree's best error=0.0962,\tbest estimator rf's best error=0.0743\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 18, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 0.8s,\testimator xgb_limitdepth's best error=0.0820,\tbest estimator rf's best error=0.0743\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 19, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 0.8s,\testimator lgbm's best error=0.0861,\tbest estimator rf's best error=0.0743\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 20, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 0.9s,\testimator xgb_limitdepth's best error=0.0820,\tbest estimator rf's best error=0.0743\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 21, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 0.9s,\testimator xgboost's best error=0.2637,\tbest estimator rf's best error=0.0743\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 22, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 0.9s,\testimator xgboost's best error=0.0959,\tbest estimator rf's best error=0.0743\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 23, current learner rf\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 1.0s,\testimator rf's best error=0.0743,\tbest estimator rf's best error=0.0743\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 24, current learner rf\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 1.1s,\testimator rf's best error=0.0743,\tbest estimator rf's best error=0.0743\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 25, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 1.1s,\testimator xgb_limitdepth's best error=0.0820,\tbest estimator rf's best error=0.0743\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 26, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 1.2s,\testimator xgb_limitdepth's best error=0.0820,\tbest estimator rf's best error=0.0743\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 27, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 1.2s,\testimator xgboost's best error=0.0959,\tbest estimator rf's best error=0.0743\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 28, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 1.2s,\testimator xgboost's best error=0.0959,\tbest estimator rf's best error=0.0743\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 29, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 1.2s,\testimator xgb_limitdepth's best error=0.0820,\tbest estimator rf's best error=0.0743\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 30, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 1.2s,\testimator xgb_limitdepth's best error=0.0820,\tbest estimator rf's best error=0.0743\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 31, current learner rf\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 1.3s,\testimator rf's best error=0.0743,\tbest estimator rf's best error=0.0743\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 32, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 1.3s,\testimator lgbm's best error=0.0861,\tbest estimator rf's best error=0.0743\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 33, current learner rf\n",
+ "[flaml.automl: 07-28 21:14:48] {3161} INFO - at 1.4s,\testimator rf's best error=0.0743,\tbest estimator rf's best error=0.0743\n",
+ "[flaml.automl: 07-28 21:14:48] {2986} INFO - iteration 34, current learner xgb_limitdepth\n",
+ "[flaml.automl: 07-28 21:14:49] {3161} INFO - at 1.4s,\testimator xgb_limitdepth's best error=0.0791,\tbest estimator rf's best error=0.0743\n",
+ "[flaml.automl: 07-28 21:14:49] {2986} INFO - iteration 35, current learner rf\n",
+ "[flaml.automl: 07-28 21:14:49] {3161} INFO - at 1.5s,\testimator rf's best error=0.0735,\tbest estimator rf's best error=0.0735\n",
+ "[flaml.automl: 07-28 21:14:49] {2986} INFO - iteration 36, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:14:49] {3161} INFO - at 1.6s,\testimator xgboost's best error=0.0834,\tbest estimator rf's best error=0.0735\n",
+ "[flaml.automl: 07-28 21:14:49] {2986} INFO - iteration 37, current learner prophet\n",
+ "[flaml.automl: 07-28 21:14:53] {3161} INFO - at 6.0s,\testimator prophet's best error=0.0592,\tbest estimator prophet's best error=0.0592\n",
+ "[flaml.automl: 07-28 21:14:53] {2986} INFO - iteration 38, current learner arima\n",
+ "[flaml.automl: 07-28 21:14:54] {3161} INFO - at 6.8s,\testimator arima's best error=0.6434,\tbest estimator prophet's best error=0.0592\n",
+ "[flaml.automl: 07-28 21:14:54] {2986} INFO - iteration 39, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:55] {3161} INFO - at 7.8s,\testimator sarimax's best error=0.6434,\tbest estimator prophet's best error=0.0592\n",
+ "[flaml.automl: 07-28 21:14:55] {2986} INFO - iteration 40, current learner sarimax\n",
+ "[flaml.automl: 07-28 21:14:57] {3161} INFO - at 9.8s,\testimator sarimax's best error=0.5313,\tbest estimator prophet's best error=0.0592\n",
+ "[flaml.automl: 07-28 21:14:57] {2986} INFO - iteration 41, current learner xgboost\n",
+ "[flaml.automl: 07-28 21:14:57] {3161} INFO - at 9.9s,\testimator xgboost's best error=0.0834,\tbest estimator prophet's best error=0.0592\n",
+ "[flaml.automl: 07-28 21:14:57] {2986} INFO - iteration 42, current learner extra_tree\n",
+ "[flaml.automl: 07-28 21:14:57] {3161} INFO - at 10.0s,\testimator extra_tree's best error=0.0962,\tbest estimator prophet's best error=0.0592\n",
+ "[flaml.automl: 07-28 21:14:57] {2986} INFO - iteration 43, current learner lgbm\n",
+ "[flaml.automl: 07-28 21:14:57] {3161} INFO - at 10.1s,\testimator lgbm's best error=0.0861,\tbest estimator prophet's best error=0.0592\n",
+ "[flaml.automl: 07-28 21:15:01] {3425} INFO - retrain prophet for 3.8s\n",
+ "[flaml.automl: 07-28 21:15:01] {3432} INFO - retrained model: \n",
+ "[flaml.automl: 07-28 21:15:01] {2725} INFO - fit succeeded\n",
+ "[flaml.automl: 07-28 21:15:01] {2726} INFO - Time taken to find the best model: 5.99089241027832\n"
]
}
],
@@ -1872,7 +1702,7 @@
},
{
"cell_type": "code",
- "execution_count": 19,
+ "execution_count": 20,
"metadata": {},
"outputs": [
{
@@ -1915,7 +1745,7 @@
},
{
"cell_type": "code",
- "execution_count": 20,
+ "execution_count": 21,
"metadata": {},
"outputs": [
{
@@ -1941,7 +1771,7 @@
},
{
"cell_type": "code",
- "execution_count": 21,
+ "execution_count": 22,
"metadata": {},
"outputs": [
{
@@ -1986,7 +1816,7 @@
},
{
"cell_type": "code",
- "execution_count": 50,
+ "execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
@@ -2009,6 +1839,171 @@
"discrete_y_train, discrete_y_test = discrete_train_df[\"above_mean_sales\"], discrete_test_df[\"above_mean_sales\"]"
]
},
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "
\n",
+ "\n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
\n",
+ "
Date
\n",
+ "
Sales
\n",
+ "
Open
\n",
+ "
Promo
\n",
+ "
Promo2
\n",
+ "
above_mean_sales
\n",
+ "
\n",
+ " \n",
+ " \n",
+ "
\n",
+ "
0
\n",
+ "
2015-02-02
\n",
+ "
24894
\n",
+ "
True
\n",
+ "
True
\n",
+ "
False
\n",
+ "
1
\n",
+ "
\n",
+ "
\n",
+ "
1
\n",
+ "
2015-02-03
\n",
+ "
22139
\n",
+ "
True
\n",
+ "
True
\n",
+ "
False
\n",
+ "
1
\n",
+ "
\n",
+ "
\n",
+ "
2
\n",
+ "
2015-02-04
\n",
+ "
20452
\n",
+ "
True
\n",
+ "
True
\n",
+ "
False
\n",
+ "
1
\n",
+ "
\n",
+ "
\n",
+ "
3
\n",
+ "
2015-02-05
\n",
+ "
20977
\n",
+ "
True
\n",
+ "
True
\n",
+ "
False
\n",
+ "
1
\n",
+ "
\n",
+ "
\n",
+ "
4
\n",
+ "
2015-02-06
\n",
+ "
19151
\n",
+ "
True
\n",
+ "
True
\n",
+ "
False
\n",
+ "
1
\n",
+ "
\n",
+ "
\n",
+ "
...
\n",
+ "
...
\n",
+ "
...
\n",
+ "
...
\n",
+ "
...
\n",
+ "
...
\n",
+ "
...
\n",
+ "
\n",
+ "
\n",
+ "
145
\n",
+ "
2015-06-27
\n",
+ "
13108
\n",
+ "
True
\n",
+ "
False
\n",
+ "
False
\n",
+ "
0
\n",
+ "
\n",
+ "
\n",
+ "
146
\n",
+ "
2015-06-28
\n",
+ "
0
\n",
+ "
False
\n",
+ "
False
\n",
+ "
False
\n",
+ "
0
\n",
+ "
\n",
+ "
\n",
+ "
147
\n",
+ "
2015-06-29
\n",
+ "
28456
\n",
+ "
True
\n",
+ "
True
\n",
+ "
False
\n",
+ "
1
\n",
+ "
\n",
+ "
\n",
+ "
148
\n",
+ "
2015-06-30
\n",
+ "
27140
\n",
+ "
True
\n",
+ "
True
\n",
+ "
False
\n",
+ "
1
\n",
+ "
\n",
+ "
\n",
+ "
149
\n",
+ "
2015-07-01
\n",
+ "
24957
\n",
+ "
True
\n",
+ "
True
\n",
+ "
False
\n",
+ "
1
\n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
150 rows × 6 columns
\n",
+ "
"
+ ],
+ "text/plain": [
+ " Date Sales Open Promo Promo2 above_mean_sales\n",
+ "0 2015-02-02 24894 True True False 1\n",
+ "1 2015-02-03 22139 True True False 1\n",
+ "2 2015-02-04 20452 True True False 1\n",
+ "3 2015-02-05 20977 True True False 1\n",
+ "4 2015-02-06 19151 True True False 1\n",
+ ".. ... ... ... ... ... ...\n",
+ "145 2015-06-27 13108 True False False 0\n",
+ "146 2015-06-28 0 False False False 0\n",
+ "147 2015-06-29 28456 True True False 1\n",
+ "148 2015-06-30 27140 True True False 1\n",
+ "149 2015-07-01 24957 True True False 1\n",
+ "\n",
+ "[150 rows x 6 columns]"
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "discrete_train_df"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -2018,7 +2013,7 @@
},
{
"cell_type": "code",
- "execution_count": 51,
+ "execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
@@ -2028,7 +2023,7 @@
},
{
"cell_type": "code",
- "execution_count": 52,
+ "execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
@@ -2043,890 +2038,486 @@
},
{
"cell_type": "code",
- "execution_count": 53,
+ "execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
- "[flaml.automl: 02-28 21:54:50] {2060} INFO - task = ts_forecast_classification\n",
- "[flaml.automl: 02-28 21:54:50] {2062} INFO - Data split method: time\n",
- "[flaml.automl: 02-28 21:54:50] {2066} INFO - Evaluation method: holdout\n",
- "[flaml.automl: 02-28 21:54:50] {2147} INFO - Minimizing error metric: 1-accuracy\n",
- "[flaml.automl: 02-28 21:54:50] {2205} INFO - List of ML learners in AutoML Run: ['lgbm', 'rf', 'xgboost', 'extra_tree', 'xgb_limitdepth']\n",
- "[flaml.automl: 02-28 21:54:50] {2458} INFO - iteration 0, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:50] {2573} INFO - Estimated sufficient time budget=249s. Estimated necessary time budget=0s.\n",
- "[flaml.automl: 02-28 21:54:50] {2620} INFO - at 0.0s,\testimator lgbm's best error=0.2667,\tbest estimator lgbm's best error=0.2667\n",
- "[flaml.automl: 02-28 21:54:50] {2458} INFO - iteration 1, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:50] {2620} INFO - at 0.1s,\testimator lgbm's best error=0.2667,\tbest estimator lgbm's best error=0.2667\n",
- "[flaml.automl: 02-28 21:54:50] {2458} INFO - iteration 2, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:50] {2620} INFO - at 0.1s,\testimator lgbm's best error=0.1333,\tbest estimator lgbm's best error=0.1333\n",
- "[flaml.automl: 02-28 21:54:50] {2458} INFO - iteration 3, current learner rf\n",
- "[flaml.automl: 02-28 21:54:50] {2620} INFO - at 0.1s,\testimator rf's best error=0.1333,\tbest estimator lgbm's best error=0.1333\n",
- "[flaml.automl: 02-28 21:54:50] {2458} INFO - iteration 4, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:50] {2620} INFO - at 0.2s,\testimator xgboost's best error=0.1333,\tbest estimator lgbm's best error=0.1333\n",
- "[flaml.automl: 02-28 21:54:50] {2458} INFO - iteration 5, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:50] {2620} INFO - at 0.2s,\testimator lgbm's best error=0.1333,\tbest estimator lgbm's best error=0.1333\n",
- "[flaml.automl: 02-28 21:54:50] {2458} INFO - iteration 6, current learner rf\n",
- "[flaml.automl: 02-28 21:54:50] {2620} INFO - at 0.2s,\testimator rf's best error=0.0667,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:50] {2458} INFO - iteration 7, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:50] {2620} INFO - at 0.3s,\testimator lgbm's best error=0.0667,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:50] {2458} INFO - iteration 8, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:50] {2620} INFO - at 0.3s,\testimator lgbm's best error=0.0667,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:50] {2458} INFO - iteration 9, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:50] {2620} INFO - at 0.3s,\testimator lgbm's best error=0.0667,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:50] {2458} INFO - iteration 10, current learner rf\n",
- "[flaml.automl: 02-28 21:54:50] {2620} INFO - at 0.4s,\testimator rf's best error=0.0667,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:50] {2458} INFO - iteration 11, current learner rf\n",
- "[flaml.automl: 02-28 21:54:50] {2620} INFO - at 0.4s,\testimator rf's best error=0.0667,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:50] {2458} INFO - iteration 12, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:50] {2620} INFO - at 0.4s,\testimator xgboost's best error=0.1333,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:50] {2458} INFO - iteration 13, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:50] {2620} INFO - at 0.5s,\testimator extra_tree's best error=0.1333,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:50] {2458} INFO - iteration 14, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:50] {2620} INFO - at 0.5s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:50] {2458} INFO - iteration 15, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:50] {2620} INFO - at 0.5s,\testimator xgboost's best error=0.0667,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:50] {2458} INFO - iteration 16, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:50] {2620} INFO - at 0.6s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:50] {2458} INFO - iteration 17, current learner rf\n",
- "[flaml.automl: 02-28 21:54:50] {2620} INFO - at 0.6s,\testimator rf's best error=0.0667,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:50] {2458} INFO - iteration 18, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 0.6s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 19, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 0.7s,\testimator lgbm's best error=0.0667,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 20, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 0.7s,\testimator extra_tree's best error=0.0667,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 21, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 0.8s,\testimator xgboost's best error=0.0667,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 22, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 0.8s,\testimator extra_tree's best error=0.0667,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 23, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 0.8s,\testimator lgbm's best error=0.0667,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 24, current learner rf\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 0.9s,\testimator rf's best error=0.0667,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 25, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 0.9s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 26, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 0.9s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator rf's best error=0.0667\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 27, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.0s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 28, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.0s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 29, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.0s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 30, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.1s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 31, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.1s,\testimator lgbm's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 32, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.1s,\testimator lgbm's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 33, current learner rf\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.2s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 34, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.2s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 35, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.2s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 36, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.2s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 37, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.3s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 38, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.3s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 39, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.3s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 40, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.4s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 41, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.4s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 42, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.4s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 43, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.5s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 44, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.5s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 45, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.6s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 46, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:51] {2620} INFO - at 1.6s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:51] {2458} INFO - iteration 47, current learner rf\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 1.7s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 48, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 1.7s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 49, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 1.7s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 50, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 1.8s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 51, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 1.8s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 52, current learner rf\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 1.9s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 53, current learner rf\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 1.9s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 54, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 1.9s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 55, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.0s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 56, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.0s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 57, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.0s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 58, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.0s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 59, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.1s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 60, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.1s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 61, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.1s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 62, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.1s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 63, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.2s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 64, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.2s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 65, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.2s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 66, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.2s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 67, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.3s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 68, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.3s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 69, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.3s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 70, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.3s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 71, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.4s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 72, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.4s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 73, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.4s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 74, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.5s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 75, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.5s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 76, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.5s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 77, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.5s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 78, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.6s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:52] {2458} INFO - iteration 79, current learner rf\n",
- "[flaml.automl: 02-28 21:54:52] {2620} INFO - at 2.6s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 80, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 2.7s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 81, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 2.7s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 82, current learner rf\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 2.7s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 83, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 2.8s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 84, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 2.8s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 85, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 2.9s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 86, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 2.9s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 87, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.0s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 88, current learner rf\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.0s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 89, current learner rf\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.1s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 90, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.1s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 91, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.1s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 92, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.1s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 93, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.2s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 94, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.2s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 95, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.2s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 96, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.3s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 97, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.3s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 98, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.3s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 99, current learner rf\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.4s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 100, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.4s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 101, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.4s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 102, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.4s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 103, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.5s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 104, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.5s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 105, current learner rf\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.5s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 106, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.5s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 107, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.6s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 108, current learner rf\n",
- "[flaml.automl: 02-28 21:54:53] {2620} INFO - at 3.6s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:53] {2458} INFO - iteration 109, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 3.6s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 110, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 3.7s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 111, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 3.7s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 112, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 3.7s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 113, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 3.7s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 114, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 3.8s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 115, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 3.8s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 116, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 3.8s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 117, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 3.8s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 118, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 3.9s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 119, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 3.9s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 120, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 3.9s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 121, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.0s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 122, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.0s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 123, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.1s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 124, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.1s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 125, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.1s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 126, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.1s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 127, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.2s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 128, current learner rf\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.2s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 129, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.2s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 130, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.3s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 131, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.3s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 132, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.3s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 133, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.3s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 134, current learner rf\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.4s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 135, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.4s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 136, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.4s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 137, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.4s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 138, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.5s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 139, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.5s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 140, current learner rf\n",
- "[flaml.automl: 02-28 21:54:54] {2620} INFO - at 4.5s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:54] {2458} INFO - iteration 141, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 4.7s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 142, current learner rf\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 4.8s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 143, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 4.8s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 144, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 4.9s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 145, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 4.9s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 146, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 4.9s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 147, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 4.9s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 148, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 4.9s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 149, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.0s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 150, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.0s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 151, current learner rf\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.0s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 152, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.1s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 153, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.1s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 154, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.1s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 155, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.2s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 156, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.2s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 157, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.2s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 158, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.2s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 159, current learner rf\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.3s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 160, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.3s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 161, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.4s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 162, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.4s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 163, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.4s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 164, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.4s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 165, current learner rf\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.5s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 166, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.5s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 167, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.5s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 168, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.6s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 169, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.6s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 170, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:55] {2620} INFO - at 5.6s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:55] {2458} INFO - iteration 171, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:56] {2620} INFO - at 5.7s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:56] {2458} INFO - iteration 172, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:56] {2620} INFO - at 5.7s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:56] {2458} INFO - iteration 173, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:56] {2620} INFO - at 5.7s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:56] {2458} INFO - iteration 174, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:56] {2620} INFO - at 5.7s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:56] {2458} INFO - iteration 175, current learner rf\n",
- "[flaml.automl: 02-28 21:54:56] {2620} INFO - at 5.8s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:56] {2458} INFO - iteration 176, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:56] {2620} INFO - at 5.8s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:56] {2458} INFO - iteration 177, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:56] {2620} INFO - at 5.8s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:56] {2458} INFO - iteration 178, current learner rf\n",
- "[flaml.automl: 02-28 21:54:56] {2620} INFO - at 5.9s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:56] {2458} INFO - iteration 179, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:56] {2620} INFO - at 6.2s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:56] {2458} INFO - iteration 180, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:56] {2620} INFO - at 6.3s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:56] {2458} INFO - iteration 181, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:56] {2620} INFO - at 6.3s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:56] {2458} INFO - iteration 182, current learner rf\n",
- "[flaml.automl: 02-28 21:54:56] {2620} INFO - at 6.4s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:56] {2458} INFO - iteration 183, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:56] {2620} INFO - at 6.4s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:56] {2458} INFO - iteration 184, current learner rf\n",
- "[flaml.automl: 02-28 21:54:56] {2620} INFO - at 6.4s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:56] {2458} INFO - iteration 185, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:56] {2620} INFO - at 6.5s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:56] {2458} INFO - iteration 186, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:56] {2620} INFO - at 6.5s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:56] {2458} INFO - iteration 187, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:56] {2620} INFO - at 6.5s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:56] {2458} INFO - iteration 188, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:56] {2620} INFO - at 6.6s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:56] {2458} INFO - iteration 189, current learner rf\n",
- "[flaml.automl: 02-28 21:54:56] {2620} INFO - at 6.6s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:56] {2458} INFO - iteration 190, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 6.6s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 191, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 6.7s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 192, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 6.7s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 193, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 6.7s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 194, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 6.8s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 195, current learner rf\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 6.8s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 196, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 6.8s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 197, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 6.9s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 198, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 6.9s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 199, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 6.9s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 200, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 6.9s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 201, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 7.0s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 202, current learner rf\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 7.0s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 203, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 7.1s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 204, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 7.1s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 205, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 7.5s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 206, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 7.5s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 207, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 7.5s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 208, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 7.6s,\testimator extra_tree's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 209, current learner rf\n",
- "[flaml.automl: 02-28 21:54:57] {2620} INFO - at 7.6s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:57] {2458} INFO - iteration 210, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 7.6s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 211, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 7.7s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 212, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 7.7s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 213, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 7.7s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 214, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 7.7s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 215, current learner rf\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 7.8s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 216, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 7.8s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 217, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 7.8s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 218, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 7.8s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 219, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 7.9s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 220, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 7.9s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 221, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 7.9s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 222, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.0s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 223, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.0s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 224, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.0s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 225, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.0s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 226, current learner rf\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.1s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 227, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.1s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 228, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.1s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 229, current learner rf\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.2s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 230, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.2s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 231, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.2s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 232, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.3s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 233, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.3s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 234, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.3s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 235, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.3s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 236, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.4s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 237, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.4s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 238, current learner rf\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.4s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 239, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.5s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 240, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.5s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 241, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.5s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 242, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.5s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 243, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.6s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 244, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:58] {2620} INFO - at 8.6s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:58] {2458} INFO - iteration 245, current learner rf\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 8.6s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 246, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 8.7s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 247, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 8.7s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 248, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 8.7s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 249, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 8.8s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 250, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 8.8s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 251, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 8.9s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 252, current learner rf\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 8.9s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 253, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 8.9s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 254, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.0s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 255, current learner lgbm\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.0s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 256, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.0s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 257, current learner rf\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.1s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 258, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.1s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 259, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.1s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 260, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.2s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 261, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.2s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 262, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.2s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 263, current learner rf\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.3s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 264, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.3s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 265, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.3s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 266, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.3s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 267, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.4s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 268, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.4s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 269, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.4s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 270, current learner rf\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.5s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 271, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.5s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 272, current learner xgboost\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.6s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 273, current learner rf\n",
- "[flaml.automl: 02-28 21:54:59] {2620} INFO - at 9.6s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:54:59] {2458} INFO - iteration 274, current learner rf\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 9.6s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 275, current learner rf\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 9.7s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 276, current learner rf\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 9.7s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 277, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 9.8s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 278, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 9.8s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 279, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 9.8s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 280, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 9.9s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 281, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 9.9s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 282, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 9.9s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 283, current learner rf\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 10.0s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 284, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 10.0s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 285, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 10.0s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 286, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 10.1s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 287, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 10.1s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 288, current learner rf\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 10.1s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 289, current learner rf\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 10.2s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 290, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 10.2s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 291, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 10.2s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 292, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 10.3s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 293, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 10.3s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 294, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 10.3s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 295, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 10.4s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 296, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 10.4s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 297, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 10.4s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 298, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 10.5s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 299, current learner rf\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 10.5s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 300, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 10.6s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 301, current learner rf\n",
- "[flaml.automl: 02-28 21:55:00] {2620} INFO - at 10.6s,\testimator rf's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:00] {2458} INFO - iteration 302, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 10.6s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 303, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 10.7s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 304, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 10.7s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 305, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 10.7s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 306, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 10.8s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 307, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 10.8s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 308, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 10.8s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 309, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 10.8s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 310, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 10.9s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 311, current learner rf\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 10.9s,\testimator rf's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 312, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 10.9s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 313, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.0s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 314, current learner rf\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.0s,\testimator rf's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 315, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.0s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 316, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.1s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 317, current learner rf\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.1s,\testimator rf's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 318, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.1s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 319, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.2s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 320, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.2s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 321, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.2s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 322, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.3s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 323, current learner rf\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.3s,\testimator rf's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 324, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.3s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 325, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.4s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 326, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.4s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 327, current learner rf\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.5s,\testimator rf's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 328, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.5s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 329, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.5s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 330, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.5s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 331, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.5s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 332, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.6s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 333, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:01] {2620} INFO - at 11.6s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:01] {2458} INFO - iteration 334, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 11.6s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 335, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 11.7s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 336, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 11.7s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 337, current learner rf\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 11.8s,\testimator rf's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 338, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 11.8s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 339, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 11.8s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 340, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 11.9s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 341, current learner rf\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 11.9s,\testimator rf's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 342, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 11.9s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 343, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 12.0s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 344, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 12.0s,\testimator xgboost's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 345, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 12.0s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 346, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 12.0s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 347, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 12.1s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 348, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 12.1s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 349, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 12.1s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 350, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 12.2s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 351, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 12.2s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 352, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 12.2s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 353, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 12.3s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 354, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 12.3s,\testimator lgbm's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 355, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 12.3s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 356, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 12.4s,\testimator extra_tree's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 357, current learner rf\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 12.4s,\testimator rf's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 358, current learner rf\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 12.5s,\testimator rf's best error=0.0333,\tbest estimator xgboost's best error=0.0333\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 359, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 12.5s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 360, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 12.6s,\testimator xgboost's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 361, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:02] {2620} INFO - at 12.6s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:02] {2458} INFO - iteration 362, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 12.6s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 363, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 12.7s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 364, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 12.7s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 365, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 12.7s,\testimator xgboost's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 366, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 12.7s,\testimator lgbm's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 367, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 12.8s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 368, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 12.8s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 369, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 12.8s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 370, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 12.9s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 371, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 12.9s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 372, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 12.9s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 373, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 13.0s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 374, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 13.0s,\testimator lgbm's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 375, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 13.0s,\testimator xgboost's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 376, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 13.0s,\testimator lgbm's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 377, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 13.1s,\testimator lgbm's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 378, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 13.1s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 379, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 13.1s,\testimator lgbm's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 380, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 13.1s,\testimator xgboost's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 381, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 13.2s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 382, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 13.2s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 383, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 13.3s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 384, current learner rf\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 13.3s,\testimator rf's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 385, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 13.4s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 386, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 13.4s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 387, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 13.4s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 388, current learner rf\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 13.5s,\testimator rf's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 389, current learner rf\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 13.5s,\testimator rf's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 390, current learner rf\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 13.6s,\testimator rf's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 391, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:03] {2620} INFO - at 13.6s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:03] {2458} INFO - iteration 392, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 13.7s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 393, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 13.7s,\testimator lgbm's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 394, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 13.7s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 395, current learner rf\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 13.8s,\testimator rf's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 396, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 13.8s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 397, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 13.8s,\testimator xgboost's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 398, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 13.9s,\testimator lgbm's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 399, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 13.9s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 400, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 14.0s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 401, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 14.0s,\testimator xgboost's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 402, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 14.0s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 403, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 14.1s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 404, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 14.1s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 405, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 14.1s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 406, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 14.2s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 407, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 14.2s,\testimator lgbm's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 408, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 14.2s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 409, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 14.3s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 410, current learner xgb_limitdepth\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 14.3s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 411, current learner rf\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 14.4s,\testimator rf's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 412, current learner rf\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 14.4s,\testimator rf's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 413, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 14.4s,\testimator lgbm's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 414, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 14.5s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 415, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 14.5s,\testimator lgbm's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 416, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 14.5s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 417, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:04] {2620} INFO - at 14.6s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:04] {2458} INFO - iteration 418, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:05] {2620} INFO - at 14.6s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:05] {2458} INFO - iteration 419, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:05] {2620} INFO - at 14.7s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:05] {2458} INFO - iteration 420, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:05] {2620} INFO - at 14.7s,\testimator xgboost's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:05] {2458} INFO - iteration 421, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:05] {2620} INFO - at 14.7s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:05] {2458} INFO - iteration 422, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:05] {2620} INFO - at 14.8s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:05] {2458} INFO - iteration 423, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:05] {2620} INFO - at 14.8s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:05] {2458} INFO - iteration 424, current learner extra_tree\n",
- "[flaml.automl: 02-28 21:55:05] {2620} INFO - at 14.9s,\testimator extra_tree's best error=0.0000,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:05] {2458} INFO - iteration 425, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:05] {2620} INFO - at 14.9s,\testimator lgbm's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:05] {2458} INFO - iteration 426, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:05] {2620} INFO - at 14.9s,\testimator lgbm's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:05] {2458} INFO - iteration 427, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:05] {2620} INFO - at 15.0s,\testimator lgbm's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:05] {2458} INFO - iteration 428, current learner lgbm\n",
- "[flaml.automl: 02-28 21:55:05] {2620} INFO - at 15.0s,\testimator lgbm's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:05] {2458} INFO - iteration 429, current learner xgboost\n",
- "[flaml.automl: 02-28 21:55:05] {2620} INFO - at 15.0s,\testimator xgboost's best error=0.0333,\tbest estimator extra_tree's best error=0.0000\n",
- "[flaml.automl: 02-28 21:55:05] {2850} INFO - retrain extra_tree for 0.0s\n",
- "[flaml.automl: 02-28 21:55:05] {2857} INFO - retrained model: ExtraTreesClassifier(bootstrap=False, ccp_alpha=0.0, class_weight=None,\n",
- " criterion='gini', max_depth=None, max_features=0.1,\n",
- " max_leaf_nodes=8, max_samples=None,\n",
- " min_impurity_decrease=0.0, min_samples_leaf=1,\n",
- " min_samples_split=2, min_weight_fraction_leaf=0.0,\n",
- " n_estimators=6, n_jobs=-1, oob_score=False,\n",
- " random_state=None, verbose=0, warm_start=False)\n",
- "[flaml.automl: 02-28 21:55:05] {2234} INFO - fit succeeded\n",
- "[flaml.automl: 02-28 21:55:05] {2235} INFO - Time taken to find the best model: 12.538578033447266\n",
- "[flaml.automl: 02-28 21:55:05] {2246} WARNING - Time taken to find the best model is 84% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n"
+ "[flaml.automl: 08-03 20:33:26] {2520} INFO - task = ts_forecast_classification\n",
+ "[flaml.automl: 08-03 20:33:26] {2522} INFO - Data split method: time\n",
+ "[flaml.automl: 08-03 20:33:26] {2525} INFO - Evaluation method: holdout\n",
+ "[flaml.automl: 08-03 20:33:26] {2644} INFO - Minimizing error metric: 1-accuracy\n",
+ "[flaml.automl: 08-03 20:33:27] {2786} INFO - List of ML learners in AutoML Run: ['lgbm', 'rf', 'xgboost', 'extra_tree', 'xgb_limitdepth']\n",
+ "[flaml.automl: 08-03 20:33:27] {3088} INFO - iteration 0, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3221} INFO - Estimated sufficient time budget=11912s. Estimated necessary time budget=12s.\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 2.2s,\testimator lgbm's best error=0.2667,\tbest estimator lgbm's best error=0.2667\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 1, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 2.2s,\testimator lgbm's best error=0.2667,\tbest estimator lgbm's best error=0.2667\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 2, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 2.2s,\testimator lgbm's best error=0.1333,\tbest estimator lgbm's best error=0.1333\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 3, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 2.3s,\testimator lgbm's best error=0.1333,\tbest estimator lgbm's best error=0.1333\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 4, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 2.3s,\testimator lgbm's best error=0.0667,\tbest estimator lgbm's best error=0.0667\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 5, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 2.4s,\testimator lgbm's best error=0.0667,\tbest estimator lgbm's best error=0.0667\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 6, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 2.5s,\testimator lgbm's best error=0.0667,\tbest estimator lgbm's best error=0.0667\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 7, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 2.5s,\testimator lgbm's best error=0.0667,\tbest estimator lgbm's best error=0.0667\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 8, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 2.5s,\testimator lgbm's best error=0.0667,\tbest estimator lgbm's best error=0.0667\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 9, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 2.6s,\testimator lgbm's best error=0.0667,\tbest estimator lgbm's best error=0.0667\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 10, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 2.6s,\testimator lgbm's best error=0.0667,\tbest estimator lgbm's best error=0.0667\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 11, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 2.7s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 12, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 2.7s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 13, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 2.8s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 14, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 2.8s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 15, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 2.8s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 16, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 2.9s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 17, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 2.9s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 18, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 3.0s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 19, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 3.0s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 20, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:29] {3268} INFO - at 3.0s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:29] {3088} INFO - iteration 21, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.1s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 22, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.1s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 23, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.2s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 24, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.2s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 25, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.3s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 26, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.3s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 27, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.4s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 28, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.4s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 29, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.4s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 30, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.5s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 31, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.5s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 32, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.5s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 33, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.6s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 34, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.6s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 35, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.7s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 36, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.7s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 37, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.7s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 38, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.8s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 39, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.8s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 40, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.9s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 41, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.9s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 42, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 3.9s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 43, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 4.0s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 44, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:30] {3268} INFO - at 4.0s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:30] {3088} INFO - iteration 45, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 4.1s,\testimator rf's best error=0.1333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 46, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 4.1s,\testimator rf's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 47, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 4.2s,\testimator rf's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 48, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 4.2s,\testimator rf's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 49, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 4.2s,\testimator rf's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 50, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 4.3s,\testimator rf's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 51, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 4.3s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 52, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 4.3s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 53, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 4.4s,\testimator rf's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 54, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 4.5s,\testimator rf's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 55, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 4.5s,\testimator rf's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 56, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 4.6s,\testimator rf's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 57, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 4.6s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 58, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 4.6s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 59, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 4.7s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 60, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 4.7s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 61, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 4.8s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 62, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 4.9s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 63, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 4.9s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 64, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 5.0s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 65, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 5.0s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 66, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:31] {3268} INFO - at 5.0s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:31] {3088} INFO - iteration 67, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 5.1s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 68, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 5.2s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 69, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 5.2s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 70, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 5.2s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 71, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 5.3s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 72, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 5.3s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 73, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 5.4s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 74, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 5.4s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 75, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 5.5s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 76, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 5.5s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 77, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 5.5s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 78, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 5.6s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 79, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 5.7s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 80, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 5.7s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 81, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 5.8s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 82, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 5.8s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 83, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 5.9s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 84, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 5.9s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 85, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 6.0s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 86, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:32] {3268} INFO - at 6.0s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:32] {3088} INFO - iteration 87, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:33] {3268} INFO - at 6.1s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:33] {3088} INFO - iteration 88, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:33] {3268} INFO - at 6.1s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:33] {3088} INFO - iteration 89, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:33] {3268} INFO - at 6.2s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:33] {3088} INFO - iteration 90, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:33] {3268} INFO - at 6.2s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:33] {3088} INFO - iteration 91, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:34] {3268} INFO - at 7.8s,\testimator xgboost's best error=0.1333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:34] {3088} INFO - iteration 92, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:34] {3268} INFO - at 7.9s,\testimator extra_tree's best error=0.1333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:34] {3088} INFO - iteration 93, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:34] {3268} INFO - at 7.9s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:34] {3088} INFO - iteration 94, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:34] {3268} INFO - at 8.0s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:34] {3088} INFO - iteration 95, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:34] {3268} INFO - at 8.0s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:35] {3088} INFO - iteration 96, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:35] {3268} INFO - at 8.1s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:35] {3088} INFO - iteration 97, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:35] {3268} INFO - at 8.1s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:35] {3088} INFO - iteration 98, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:35] {3268} INFO - at 8.2s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:35] {3088} INFO - iteration 99, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:35] {3268} INFO - at 8.2s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:35] {3088} INFO - iteration 100, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:35] {3268} INFO - at 8.3s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:35] {3088} INFO - iteration 101, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:35] {3268} INFO - at 8.3s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:35] {3088} INFO - iteration 102, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:35] {3268} INFO - at 8.4s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:35] {3088} INFO - iteration 103, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:35] {3268} INFO - at 8.4s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:35] {3088} INFO - iteration 104, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:35] {3268} INFO - at 8.5s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:35] {3088} INFO - iteration 105, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:35] {3268} INFO - at 8.6s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:35] {3088} INFO - iteration 106, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:35] {3268} INFO - at 8.6s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:35] {3088} INFO - iteration 107, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:35] {3268} INFO - at 8.7s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:35] {3088} INFO - iteration 108, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:35] {3268} INFO - at 8.7s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:35] {3088} INFO - iteration 109, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:35] {3268} INFO - at 8.8s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:35] {3088} INFO - iteration 110, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:35] {3268} INFO - at 8.8s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:35] {3088} INFO - iteration 111, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:35] {3268} INFO - at 8.9s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:35] {3088} INFO - iteration 112, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:35] {3268} INFO - at 9.0s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:35] {3088} INFO - iteration 113, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:35] {3268} INFO - at 9.0s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:35] {3088} INFO - iteration 114, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:36] {3268} INFO - at 9.1s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:36] {3088} INFO - iteration 115, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:36] {3268} INFO - at 9.1s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:36] {3088} INFO - iteration 116, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:36] {3268} INFO - at 9.2s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:36] {3088} INFO - iteration 117, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:36] {3268} INFO - at 9.2s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:36] {3088} INFO - iteration 118, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:36] {3268} INFO - at 9.3s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:36] {3088} INFO - iteration 119, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:36] {3268} INFO - at 9.3s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:36] {3088} INFO - iteration 120, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:36] {3268} INFO - at 9.4s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:36] {3088} INFO - iteration 121, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:36] {3268} INFO - at 9.4s,\testimator extra_tree's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:36] {3088} INFO - iteration 122, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:36] {3268} INFO - at 9.5s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:36] {3088} INFO - iteration 123, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:36] {3268} INFO - at 9.5s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:36] {3088} INFO - iteration 124, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:36] {3268} INFO - at 9.6s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:36] {3088} INFO - iteration 125, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:36] {3268} INFO - at 9.6s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:36] {3088} INFO - iteration 126, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:36] {3268} INFO - at 9.7s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:36] {3088} INFO - iteration 127, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:36] {3268} INFO - at 9.8s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:36] {3088} INFO - iteration 128, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:36] {3268} INFO - at 9.8s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:36] {3088} INFO - iteration 129, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:36] {3268} INFO - at 9.9s,\testimator xgboost's best error=0.1333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:36] {3088} INFO - iteration 130, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:36] {3268} INFO - at 9.9s,\testimator xgboost's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:36] {3088} INFO - iteration 131, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:36] {3268} INFO - at 10.0s,\testimator xgboost's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:36] {3088} INFO - iteration 132, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:36] {3268} INFO - at 10.0s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:36] {3088} INFO - iteration 133, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:37] {3268} INFO - at 10.1s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:37] {3088} INFO - iteration 134, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:37] {3268} INFO - at 10.1s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:37] {3088} INFO - iteration 135, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:37] {3268} INFO - at 10.2s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:37] {3088} INFO - iteration 136, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:37] {3268} INFO - at 10.2s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:37] {3088} INFO - iteration 137, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:37] {3268} INFO - at 10.3s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:37] {3088} INFO - iteration 138, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:37] {3268} INFO - at 10.3s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:37] {3088} INFO - iteration 139, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:37] {3268} INFO - at 10.4s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:37] {3088} INFO - iteration 140, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:37] {3268} INFO - at 10.4s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:37] {3088} INFO - iteration 141, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:37] {3268} INFO - at 10.4s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:37] {3088} INFO - iteration 142, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:37] {3268} INFO - at 10.5s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:37] {3088} INFO - iteration 143, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:37] {3268} INFO - at 10.6s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:37] {3088} INFO - iteration 144, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:37] {3268} INFO - at 10.6s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:37] {3088} INFO - iteration 145, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:37] {3268} INFO - at 10.7s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:37] {3088} INFO - iteration 146, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:37] {3268} INFO - at 10.8s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:37] {3088} INFO - iteration 147, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:37] {3268} INFO - at 10.9s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:37] {3088} INFO - iteration 148, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:37] {3268} INFO - at 10.9s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:37] {3088} INFO - iteration 149, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:37] {3268} INFO - at 11.0s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:37] {3088} INFO - iteration 150, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:37] {3268} INFO - at 11.0s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:37] {3088} INFO - iteration 151, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:38] {3268} INFO - at 11.1s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:38] {3088} INFO - iteration 152, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:38] {3268} INFO - at 11.1s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:38] {3088} INFO - iteration 153, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:38] {3268} INFO - at 11.2s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:38] {3088} INFO - iteration 154, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:38] {3268} INFO - at 11.2s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:38] {3088} INFO - iteration 155, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:38] {3268} INFO - at 11.3s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:38] {3088} INFO - iteration 156, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:38] {3268} INFO - at 11.4s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:38] {3088} INFO - iteration 157, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:38] {3268} INFO - at 11.4s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:38] {3088} INFO - iteration 158, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:38] {3268} INFO - at 11.5s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:38] {3088} INFO - iteration 159, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:38] {3268} INFO - at 11.5s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:38] {3088} INFO - iteration 160, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:38] {3268} INFO - at 11.6s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:38] {3088} INFO - iteration 161, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:38] {3268} INFO - at 11.6s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:38] {3088} INFO - iteration 162, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:38] {3268} INFO - at 11.7s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:38] {3088} INFO - iteration 163, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:38] {3268} INFO - at 11.7s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:38] {3088} INFO - iteration 164, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:38] {3268} INFO - at 11.8s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:38] {3088} INFO - iteration 165, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:38] {3268} INFO - at 11.8s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:38] {3088} INFO - iteration 166, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:38] {3268} INFO - at 11.9s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:38] {3088} INFO - iteration 167, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:38] {3268} INFO - at 12.0s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:38] {3088} INFO - iteration 168, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 12.1s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 169, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 12.1s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 170, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 12.2s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 171, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 12.2s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 172, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 12.2s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 173, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 12.3s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 174, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 12.3s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 175, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 12.4s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 176, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 12.4s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 177, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 12.5s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 178, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 12.5s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 179, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 12.6s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 180, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 12.6s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 181, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 12.7s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 182, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 12.7s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 183, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 12.7s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 184, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 12.8s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 185, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 12.9s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 186, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 12.9s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 187, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:39] {3268} INFO - at 13.0s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:39] {3088} INFO - iteration 188, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:40] {3268} INFO - at 13.1s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:40] {3088} INFO - iteration 189, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:40] {3268} INFO - at 13.1s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:40] {3088} INFO - iteration 190, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:40] {3268} INFO - at 13.2s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:40] {3088} INFO - iteration 191, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:40] {3268} INFO - at 13.2s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:40] {3088} INFO - iteration 192, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:40] {3268} INFO - at 13.3s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:40] {3088} INFO - iteration 193, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:40] {3268} INFO - at 13.3s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:40] {3088} INFO - iteration 194, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:40] {3268} INFO - at 13.4s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:40] {3088} INFO - iteration 195, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:40] {3268} INFO - at 13.4s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:40] {3088} INFO - iteration 196, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:40] {3268} INFO - at 13.5s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:40] {3088} INFO - iteration 197, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:40] {3268} INFO - at 13.5s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:40] {3088} INFO - iteration 198, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:40] {3268} INFO - at 13.6s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:40] {3088} INFO - iteration 199, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:40] {3268} INFO - at 13.6s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:40] {3088} INFO - iteration 200, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:40] {3268} INFO - at 13.7s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:40] {3088} INFO - iteration 201, current learner rf\n",
+ "[flaml.automl: 08-03 20:33:40] {3268} INFO - at 13.8s,\testimator rf's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:40] {3088} INFO - iteration 202, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:40] {3268} INFO - at 13.8s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:40] {3088} INFO - iteration 203, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:40] {3268} INFO - at 13.9s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:40] {3088} INFO - iteration 204, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:40] {3268} INFO - at 13.9s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:40] {3088} INFO - iteration 205, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:40] {3268} INFO - at 14.0s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:40] {3088} INFO - iteration 206, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.1s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 207, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.1s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 208, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.1s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 209, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.2s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 210, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.2s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 211, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.3s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 212, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.3s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 213, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.4s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 214, current learner xgb_limitdepth\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.4s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 215, current learner xgb_limitdepth\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.5s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 216, current learner xgb_limitdepth\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.5s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 217, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.6s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 218, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.6s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 219, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.7s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 220, current learner xgb_limitdepth\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.7s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 221, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.7s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 222, current learner xgb_limitdepth\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.8s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 223, current learner xgb_limitdepth\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.8s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 224, current learner lgbm\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.9s,\testimator lgbm's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 225, current learner xgboost\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.9s,\testimator xgboost's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 226, current learner xgb_limitdepth\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 14.9s,\testimator xgb_limitdepth's best error=0.0667,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3088} INFO - iteration 227, current learner extra_tree\n",
+ "[flaml.automl: 08-03 20:33:41] {3268} INFO - at 15.0s,\testimator extra_tree's best error=0.0333,\tbest estimator lgbm's best error=0.0333\n",
+ "[flaml.automl: 08-03 20:33:41] {3532} INFO - retrain lgbm for 0.0s\n",
+ "[flaml.automl: 08-03 20:33:41] {3539} INFO - retrained model: LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0,\n",
+ " importance_type='split', learning_rate=0.7333523408279569,\n",
+ " max_bin=31, max_depth=-1, min_child_samples=8,\n",
+ " min_child_weight=0.001, min_split_gain=0.0, n_estimators=4,\n",
+ " n_jobs=-1, num_leaves=5, objective=None, random_state=None,\n",
+ " reg_alpha=0.0009765625, reg_lambda=7.593190995489472,\n",
+ " silent=True, subsample=1.0, subsample_for_bin=200000,\n",
+ " subsample_freq=0, verbose=-1)\n",
+ "[flaml.automl: 08-03 20:33:41] {2817} INFO - fit succeeded\n",
+ "[flaml.automl: 08-03 20:33:41] {2818} INFO - Time taken to find the best model: 2.6732513904571533\n"
]
}
],
@@ -2947,24 +2538,25 @@
},
{
"cell_type": "code",
- "execution_count": 54,
+ "execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- "Best ML leaner: extra_tree\n",
- "Best hyperparmeter config: {'n_estimators': 6, 'max_leaves': 8, 'optimize_for_horizon': False, 'max_features': 0.1, 'lags': 8}\n",
- "Best mape on validation data: 0.0\n",
- "Training duration of best run: 0.022936344146728516s\n",
- "ExtraTreesClassifier(bootstrap=False, ccp_alpha=0.0, class_weight=None,\n",
- " criterion='gini', max_depth=None, max_features=0.1,\n",
- " max_leaf_nodes=8, max_samples=None,\n",
- " min_impurity_decrease=0.0, min_samples_leaf=1,\n",
- " min_samples_split=2, min_weight_fraction_leaf=0.0,\n",
- " n_estimators=6, n_jobs=-1, oob_score=False,\n",
- " random_state=None, verbose=0, warm_start=False)\n"
+ "Best ML leaner: lgbm\n",
+ "Best hyperparmeter config: {'n_estimators': 4, 'num_leaves': 5, 'min_child_samples': 8, 'learning_rate': 0.7333523408279569, 'log_max_bin': 5, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 7.593190995489472, 'optimize_for_horizon': False, 'lags': 5}\n",
+ "Best mape on validation data: 0.033333333333333326\n",
+ "Training duration of best run: 0.017951011657714844s\n",
+ "LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0,\n",
+ " importance_type='split', learning_rate=0.7333523408279569,\n",
+ " max_bin=31, max_depth=-1, min_child_samples=8,\n",
+ " min_child_weight=0.001, min_split_gain=0.0, n_estimators=4,\n",
+ " n_jobs=-1, num_leaves=5, objective=None, random_state=None,\n",
+ " reg_alpha=0.0009765625, reg_lambda=7.593190995489472,\n",
+ " silent=True, subsample=1.0, subsample_for_bin=200000,\n",
+ " subsample_freq=0, verbose=-1)\n"
]
}
],
@@ -2979,7 +2571,7 @@
},
{
"cell_type": "code",
- "execution_count": 55,
+ "execution_count": 9,
"metadata": {},
"outputs": [
{
@@ -3030,7 +2622,7 @@
},
{
"cell_type": "code",
- "execution_count": 56,
+ "execution_count": 10,
"metadata": {},
"outputs": [
{
@@ -3050,7 +2642,1424 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "## 5. Comparison with Alternatives (CO2 Dataset)"
+ "## 5. Forecast Problems with Panel Datasets (Multiple Time Series)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Load data and preprocess\n",
+ "\n",
+ "Import Stallion & Co.'s beverage sales data from pytorch-forecasting, orginally from Kaggle. The dataset contains about 21,000 monthly historic sales record as well as additional information about the sales price, the location of the agency, special days such as holidays, and volume sold in the entire industry. There are thousands of unique wholesaler-SKU/products combinations, each representing an individual time series. The task is to provide a six month forecast of demand at SKU level for each wholesaler."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def get_stalliion_data():\n",
+ " from pytorch_forecasting.data.examples import get_stallion_data\n",
+ "\n",
+ " data = get_stallion_data()\n",
+ " # add time index\n",
+ " data[\"time_idx\"] = data[\"date\"].dt.year * 12 + data[\"date\"].dt.month\n",
+ " data[\"time_idx\"] -= data[\"time_idx\"].min()\n",
+ " # add additional features\n",
+ " data[\"month\"] = data.date.dt.month.astype(str).astype(\n",
+ " \"category\"\n",
+ " ) # categories have be strings\n",
+ " data[\"log_volume\"] = np.log(data.volume + 1e-8)\n",
+ " data[\"avg_volume_by_sku\"] = data.groupby(\n",
+ " [\"time_idx\", \"sku\"], observed=True\n",
+ " ).volume.transform(\"mean\")\n",
+ " data[\"avg_volume_by_agency\"] = data.groupby(\n",
+ " [\"time_idx\", \"agency\"], observed=True\n",
+ " ).volume.transform(\"mean\")\n",
+ " # we want to encode special days as one variable and thus need to first reverse one-hot encoding\n",
+ " special_days = [\n",
+ " \"easter_day\",\n",
+ " \"good_friday\",\n",
+ " \"new_year\",\n",
+ " \"christmas\",\n",
+ " \"labor_day\",\n",
+ " \"independence_day\",\n",
+ " \"revolution_day_memorial\",\n",
+ " \"regional_games\",\n",
+ " \"beer_capital\",\n",
+ " \"music_fest\",\n",
+ " ]\n",
+ " data[special_days] = (\n",
+ " data[special_days]\n",
+ " .apply(lambda x: x.map({0: \"-\", 1: x.name}))\n",
+ " .astype(\"category\")\n",
+ " )\n",
+ " return data, special_days"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "data, special_days = get_stalliion_data()\n",
+ "time_horizon = 6 # predict six months\n",
+ "# make time steps first column\n",
+ "data[\"time_idx\"] = data[\"date\"].dt.year * 12 + data[\"date\"].dt.month\n",
+ "data[\"time_idx\"] -= data[\"time_idx\"].min()\n",
+ "training_cutoff = data[\"time_idx\"].max() - time_horizon\n",
+ "ts_col = data.pop(\"date\")\n",
+ "data.insert(0, \"date\", ts_col)\n",
+ "# FLAML assumes input is not sorted, but we sort here for comparison purposes with y_test\n",
+ "data = data.sort_values([\"agency\", \"sku\", \"date\"])\n",
+ "X_train = data[lambda x: x.time_idx <= training_cutoff]\n",
+ "X_test = data[lambda x: x.time_idx > training_cutoff]\n",
+ "y_train = X_train.pop(\"volume\")\n",
+ "y_test = X_test.pop(\"volume\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "