Merge branch 'main' into cv_strategy

update
This commit is contained in:
skzhang1
2022-08-21 12:33:26 +00:00
17 changed files with 626 additions and 475 deletions

View File

@@ -51,6 +51,10 @@ jobs:
if: (matrix.os == 'macOS-latest' || matrix.os == 'ubuntu-latest') && matrix.python-version != '3.9' && matrix.python-version != '3.10'
run: |
pip install -e .[forecast]
- name: Install vw on python < 3.10
if: matrix.python-version != '3.10'
run: |
pip install -e .[vw]
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names

View File

@@ -628,6 +628,8 @@ class AutoML(BaseEstimator):
keep_search_state: boolean, default=False | Whether to keep data needed
for model search after fit(). By default the state is deleted for
space saving.
preserve_checkpoint: boolean, default=True | Whether to preserve the saved checkpoint
on disk when deleting automl. By default the checkpoint is preserved.
early_stop: boolean, default=False | Whether to stop early if the
search is considered to converge.
append_log: boolean, default=False | Whetehr to directly append the log
@@ -727,6 +729,7 @@ class AutoML(BaseEstimator):
settings["starting_points"] = settings.get("starting_points", "static")
settings["n_concurrent_trials"] = settings.get("n_concurrent_trials", 1)
settings["keep_search_state"] = settings.get("keep_search_state", False)
settings["preserve_checkpoint"] = settings.get("preserve_checkpoint", True)
settings["early_stop"] = settings.get("early_stop", False)
settings["append_log"] = settings.get("append_log", False)
settings["min_sample_size"] = settings.get("min_sample_size", MIN_SAMPLE_TRAIN)
@@ -1578,6 +1581,7 @@ class AutoML(BaseEstimator):
auto_augment=None,
custom_hp=None,
skip_transform=None,
preserve_checkpoint=True,
fit_kwargs_by_estimator=None,
**fit_kwargs,
):
@@ -1714,6 +1718,11 @@ class AutoML(BaseEstimator):
self._state.fit_kwargs_by_estimator = (
fit_kwargs_by_estimator or self._settings.get("fit_kwargs_by_estimator")
)
self.preserve_checkpoint = (
self._settings.get("preserve_checkpoint")
if preserve_checkpoint is None
else preserve_checkpoint
)
self._validate_data(X_train, y_train, dataframe, label, groups=groups)
logger.info("log file name {}".format(log_file_name))
@@ -2129,6 +2138,7 @@ class AutoML(BaseEstimator):
seed=None,
n_concurrent_trials=None,
keep_search_state=None,
preserve_checkpoint=True,
early_stop=None,
append_log=None,
auto_augment=None,
@@ -2310,6 +2320,8 @@ class AutoML(BaseEstimator):
keep_search_state: boolean, default=False | Whether to keep data needed
for model search after fit(). By default the state is deleted for
space saving.
preserve_checkpoint: boolean, default=True | Whether to preserve the saved checkpoint
on disk when deleting automl. By default the checkpoint is preserved.
early_stop: boolean, default=False | Whether to stop early if the
search is considered to converge.
append_log: boolean, default=False | Whetehr to directly append the log
@@ -2503,6 +2515,11 @@ class AutoML(BaseEstimator):
if keep_search_state is None
else keep_search_state
)
self.preserve_checkpoint = (
self._settings.get("preserve_checkpoint")
if preserve_checkpoint is None
else preserve_checkpoint
)
early_stop = (
self._settings.get("early_stop") if early_stop is None else early_stop
)
@@ -3612,7 +3629,8 @@ class AutoML(BaseEstimator):
and self._trained_estimator
and hasattr(self._trained_estimator, "cleanup")
):
self._trained_estimator.cleanup()
if self.preserve_checkpoint is False:
self._trained_estimator.cleanup()
del self._trained_estimator
def _select_estimator(self, estimator_list):

View File

@@ -1626,15 +1626,26 @@ class CatBoostEstimator(BaseEstimator):
cat_features = list(X_train.select_dtypes(include="category").columns)
else:
cat_features = []
n = max(int(len(y_train) * 0.9), len(y_train) - 1000)
use_best_model = kwargs.get("use_best_model", True)
n = (
max(int(len(y_train) * 0.9), len(y_train) - 1000)
if use_best_model
else len(y_train)
)
X_tr, y_tr = X_train[:n], y_train[:n]
from catboost import Pool, __version__
eval_set = (
Pool(data=X_train[n:], label=y_train[n:], cat_features=cat_features)
if use_best_model
else None
)
if "sample_weight" in kwargs:
weight = kwargs["sample_weight"]
if weight is not None:
kwargs["sample_weight"] = weight[:n]
else:
weight = None
from catboost import Pool, __version__
model = self.estimator_class(train_dir=train_dir, **self.params)
if __version__ >= "0.26":
@@ -1642,10 +1653,10 @@ class CatBoostEstimator(BaseEstimator):
X_tr,
y_tr,
cat_features=cat_features,
eval_set=Pool(
data=X_train[n:], label=y_train[n:], cat_features=cat_features
eval_set=eval_set,
callbacks=CatBoostEstimator._callbacks(
start_time, deadline, FREE_MEM_RATIO if use_best_model else None
),
callbacks=CatBoostEstimator._callbacks(start_time, deadline),
**kwargs,
)
else:
@@ -1653,9 +1664,7 @@ class CatBoostEstimator(BaseEstimator):
X_tr,
y_tr,
cat_features=cat_features,
eval_set=Pool(
data=X_train[n:], label=y_train[n:], cat_features=cat_features
),
eval_set=eval_set,
**kwargs,
)
shutil.rmtree(train_dir, ignore_errors=True)
@@ -1667,7 +1676,7 @@ class CatBoostEstimator(BaseEstimator):
return train_time
@classmethod
def _callbacks(cls, start_time, deadline):
def _callbacks(cls, start_time, deadline, free_mem_ratio):
class ResourceLimit:
def after_iteration(self, info) -> bool:
now = time.time()
@@ -1675,9 +1684,9 @@ class CatBoostEstimator(BaseEstimator):
self._time_per_iter = now - start_time
if now + self._time_per_iter > deadline:
return False
if psutil is not None:
if psutil is not None and free_mem_ratio is not None:
mem = psutil.virtual_memory()
if mem.available / mem.total < FREE_MEM_RATIO:
if mem.available / mem.total < free_mem_ratio:
return False
return True # can continue

View File

@@ -6,6 +6,7 @@ from typing import Optional, Union, List, Callable, Tuple
import numpy as np
import datetime
import time
import os
try:
from ray import __version__ as ray_version
@@ -147,6 +148,7 @@ def run(
max_failure: Optional[int] = 100,
use_ray: Optional[bool] = False,
use_incumbent_result_in_evaluation: Optional[bool] = None,
log_file_name: Optional[str] = None,
**ray_args,
):
"""The trigger for HPO.
@@ -298,6 +300,11 @@ def run(
max_failure: int | the maximal consecutive number of failures to sample
a trial before the tuning is terminated.
use_ray: A boolean of whether to use ray as the backend.
log_file_name: A string of the log file name. Default to None.
When set to None:
if local_dir is not given, no log file is created;
if local_dir is given, the log file name will be autogenerated under local_dir.
Only valid when verbose > 0 or use_ray is True.
**ray_args: keyword arguments to pass to ray.tune.run().
Only valid when use_ray=True.
"""
@@ -309,11 +316,19 @@ def run(
old_verbose = _verbose
old_running_trial = _running_trial
old_training_iteration = _training_iteration
if local_dir and not log_file_name and verbose > 0:
os.makedirs(local_dir, exist_ok=True)
log_file_name = os.path.join(
local_dir, "tune_" + str(datetime.datetime.now()).replace(":", "-") + ".log"
)
if not use_ray:
_verbose = verbose
old_handlers = logger.handlers
old_level = logger.getEffectiveLevel()
logger.handlers = []
global _runner
old_runner = _runner
assert not ray_args, "ray_args is only valid when use_ray=True"
if (
old_handlers
and isinstance(old_handlers[0], logging.StreamHandler)
@@ -322,18 +337,8 @@ def run(
# Add the console handler.
logger.addHandler(old_handlers[0])
if verbose > 0:
if local_dir:
import os
os.makedirs(local_dir, exist_ok=True)
logger.addHandler(
logging.FileHandler(
local_dir
+ "/tune_"
+ str(datetime.datetime.now()).replace(":", "-")
+ ".log"
)
)
if log_file_name:
logger.addHandler(logging.FileHandler(log_file_name))
elif not logger.hasHandlers():
# Add the console handler.
_ch = logging.StreamHandler()
@@ -466,6 +471,10 @@ def run(
resources_per_trial=resources_per_trial,
**ray_args,
)
if log_file_name:
with open(log_file_name, "w") as f:
for trial in analysis.trials:
f.write(f"result: {trial.last_result}\n")
return analysis
finally:
_use_ray = old_use_ray
@@ -480,8 +489,6 @@ def run(
scheduler.set_search_properties(metric=metric, mode=mode)
from .trial_runner import SequentialTrialRunner
global _runner
old_runner = _runner
try:
_runner = SequentialTrialRunner(
search_alg=search_alg,
@@ -530,7 +537,7 @@ def run(
_verbose = old_verbose
_running_trial = old_running_trial
_training_iteration = old_training_iteration
_runner = old_runner
if not use_ray:
_runner = old_runner
logger.handlers = old_handlers
logger.setLevel(old_level)

View File

@@ -1 +1 @@
__version__ = "1.0.10"
__version__ = "1.0.11"

View File

@@ -39,7 +39,7 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install flaml[notebook]==1.0.8"
"%pip install flaml[notebook]==1.0.10"
]
},
{
@@ -651,6 +651,7 @@
"metadata": {},
"outputs": [],
"source": [
"# uncomment the following line if optuna is not installed\n",
"# %pip install optuna==2.8.0"
]
},

File diff suppressed because it is too large Load Diff

View File

@@ -54,7 +54,6 @@ setuptools.setup(
"catboost>=0.26",
"rgf-python",
"optuna==2.8.0",
"vowpalwabbit",
"openml",
"statsmodels>=0.12.2",
"psutil==5.8.0",
@@ -79,7 +78,7 @@ setuptools.setup(
"nni",
],
"vw": [
"vowpalwabbit",
"vowpalwabbit>=8.10.0, <9.0.0",
],
"nlp": [
"transformers[torch]==4.18",

View File

@@ -98,8 +98,8 @@ class TestRegression(unittest.TestCase):
y_train = np.random.uniform(size=300)
X_val = scipy.sparse.random(100, 900, density=0.0001)
y_val = np.random.uniform(size=100)
automl_experiment = AutoML()
automl_settings = {
automl = AutoML()
settings = {
"time_budget": 2,
"metric": "mae",
"task": "regression",
@@ -110,23 +110,34 @@ class TestRegression(unittest.TestCase):
"verbose": 0,
"early_stop": True,
}
automl_experiment.fit(
X_train=X_train,
y_train=y_train,
X_val=X_val,
y_val=y_val,
**automl_settings
automl.fit(
X_train=X_train, y_train=y_train, X_val=X_val, y_val=y_val, **settings
)
assert automl._state.X_val.shape == X_val.shape
print(automl.predict(X_train))
print(automl.model)
print(automl.config_history)
print(automl.best_model_for_estimator("rf"))
print(automl.best_iteration)
print(automl.best_estimator)
print(automl.best_config)
print(automl.best_loss)
print(automl.best_config_train_time)
settings.update(
{
"estimator_list": ["catboost"],
"keep_search_state": False,
"model_history": False,
"use_best_model": False,
"time_budget": None,
"max_iter": 2,
"custom_hp": {"catboost": {"n_estimators": {"domain": 100}}},
}
)
automl.fit(
X_train=X_train, y_train=y_train, X_val=X_val, y_val=y_val, **settings
)
assert automl_experiment._state.X_val.shape == X_val.shape
print(automl_experiment.predict(X_train))
print(automl_experiment.model)
print(automl_experiment.config_history)
print(automl_experiment.best_model_for_estimator("rf"))
print(automl_experiment.best_iteration)
print(automl_experiment.best_estimator)
print(automl_experiment.best_config)
print(automl_experiment.best_loss)
print(automl_experiment.best_config_train_time)
def test_parallel(self, hpo_method=None):
automl_experiment = AutoML()

View File

@@ -13,6 +13,7 @@ def test_hf_data():
automl = AutoML()
automl_settings = get_automl_settings()
automl_settings["preserve_checkpoint"] = False
try:
automl.fit(
@@ -68,6 +69,8 @@ def test_hf_data():
automl.predict_proba(X_test)
print(automl.classes_)
del automl
if __name__ == "__main__":
test_hf_data()

View File

@@ -1,18 +1,17 @@
import unittest
import numpy as np
import scipy.sparse
import pandas as pd
from sklearn.metrics import mean_squared_error, mean_absolute_error
import logging
from flaml.tune import loguniform, polynomial_expansion_set
from vowpalwabbit import pyvw
from flaml import AutoVW
import string
import os
import openml
from requests.exceptions import SSLError
import sys
import pytest
VW_DS_DIR = "test/data/"
NS_LIST = list(string.ascii_lowercase) + list(string.ascii_uppercase)
@@ -369,8 +368,14 @@ def get_vw_tuning_problem(tuning_hp="NamesapceInteraction"):
return vw_oml_problem_args, vw_online_aml_problem
@pytest.mark.skipif(
"3.10" in sys.version,
reason="do not run on py 3.10",
)
class TestAutoVW(unittest.TestCase):
def test_vw_oml_problem_and_vanilla_vw(self):
from vowpalwabbit import pyvw
vw_oml_problem_args, vw_online_aml_problem = get_vw_tuning_problem()
vanilla_vw = pyvw.vw(**vw_oml_problem_args["fixed_hp_config"])
cumulative_loss_list = online_learning_loop(

View File

@@ -22,6 +22,7 @@ def test_config_constraint():
metric="metric",
mode="max",
num_samples=100,
log_file_name="logs/config_constraint.log",
)
assert analysis.best_config["x"] > analysis.best_config["y"]

View File

@@ -295,7 +295,7 @@ def test_searcher():
print(searcher.suggest("t1"))
from flaml import tune
tune.run(lambda x: 1, config={}, use_ray=use_ray)
tune.run(lambda x: 1, config={}, use_ray=use_ray, log_file_name="logs/searcher.log")
def test_no_optuna():

View File

@@ -47,6 +47,8 @@ def test_nested_run():
mode="min",
num_samples=5,
local_dir="logs",
log_file_name="logs/nested.log",
verbose=3,
)
print(analysis.best_result)

View File

@@ -1,8 +1,15 @@
# Frequently Asked Questions
### [Guidelines on how to set a hyperparameter search space](Use-Cases/Tune-User-Defined-Function#details-and-guidelines-on-hyperparameter-search-space)
### [Guidelines on parallel vs seqential tuning](Use-Cases/Task-Oriented-AutoML#guidelines-on-parallel-vs-sequential-tuning)
### [Guidelines on creating and tuning a custom estimator](Use-Cases/Task-Oriented-AutoML#guidelines-on-tuning-a-custom-estimator)
### About `low_cost_partial_config` in `tune`.
- Definition and purpose: The `low_cost_partial_config` is a dictionary of subset of the hyperparameter coordinates whose value corresponds to a configuration with known low-cost (i.e., low computation cost for training the corresponding model). The concept of low/high-cost is meaningful in the case where a subset of the hyperparameters to tune directly affects the computation cost for training the model. For example, `n_estimators` and `max_leaves` are known to affect the training cost of tree-based learners. We call this subset of hyperparameters, *cost-related hyperparameters*. In such scenarios, if you are aware of low-cost configurations for the cost-related hyperparameters, you are recommended to set them as the `low_cost_partial_config`. Using the tree-based method example again, since we know that small `n_estimators` and `max_leaves` generally correspond to simpler models and thus lower cost, we set `{'n_estimators': 4, 'max_leaves': 4}` as the `low_cost_partial_config` by default (note that `4` is the lower bound of search space for these two hyperparameters), e.g., in [LGBM](https://github.com/microsoft/FLAML/blob/main/flaml/model.py#L215). Configuring `low_cost_partial_config` helps the search algorithms make more cost-efficient choices.
- Definition and purpose: The `low_cost_partial_config` is a dictionary of subset of the hyperparameter coordinates whose value corresponds to a configuration with known low-cost (i.e., low computation cost for training the corresponding model). The concept of low/high-cost is meaningful in the case where a subset of the hyperparameters to tune directly affects the computation cost for training the model. For example, `n_estimators` and `max_leaves` are known to affect the training cost of tree-based learners. We call this subset of hyperparameters, *cost-related hyperparameters*. In such scenarios, if you are aware of low-cost configurations for the cost-related hyperparameters, you are recommended to set them as the `low_cost_partial_config`. Using the tree-based method example again, since we know that small `n_estimators` and `max_leaves` generally correspond to simpler models and thus lower cost, we set `{'n_estimators': 4, 'max_leaves': 4}` as the `low_cost_partial_config` by default (note that `4` is the lower bound of search space for these two hyperparameters), e.g., in [LGBM](https://github.com/microsoft/FLAML/blob/main/flaml/model.py#L215). Configuring `low_cost_partial_config` helps the search algorithms make more cost-efficient choices.
In AutoML, the `low_cost_init_value` in `search_space()` function for each estimator serves the same role.
- Usage in practice: It is recommended to configure it if there are cost-related hyperparameters in your tuning task and you happen to know the low-cost values for them, but it is not required (It is fine to leave it the default value, i.e., `None`).

View File

@@ -125,8 +125,9 @@ The estimator list can contain one or more estimator names, each corresponding t
- tuning an estimator that is not built-in;
- customizing search space for a built-in estimator.
To tune a custom estimator that is not built-in, you need to:
#### Guidelines on tuning a custom estimator
To tune a custom estimator that is not built-in, you need to:
1. Build a custom estimator by inheritting [`flaml.model.BaseEstimator`](../reference/model#baseestimator-objects) or a derived class.
For example, if you have a estimator class with scikit-learn style `fit()` and `predict()` functions, you only need to set `self.estimator_class` to be that class in your constructor.
@@ -280,7 +281,9 @@ Some constraints on the estimator can be implemented via the custom learner. For
class MonotonicXGBoostEstimator(XGBoostSklearnEstimator):
@classmethod
def search_space(**args):
return super().search_space(**args).update({"monotone_constraints": "(1, -1)"})
space = super().search_space(**args)
space.update({"monotone_constraints": {"domain": "(1, -1)"}})
return space
```
It adds a monotonicity constraint to XGBoost. This approach can be used to set any constraint that is an argument in the underlying estimator's constructor.

View File

@@ -265,24 +265,27 @@ A user can specify constraints on the configurations to be satisfied via the arg
In the following code example, we constrain the output of `area`, which takes a configuration as input and outputs a numerical value, to be no larger than 1000.
```python
def area(config):
return config["width"] * config["height"]
def my_model_size(config):
return config["n_estimators"] * config["max_leaves"]
flaml.tune.run(evaluation_function=evaluate_config, mode="min",
config=config_search_space,
config_constraints=[(area, "<=", 1000)], ...)
analysis = tune.run(...,
config_constraints = [(my_model_size, "<=", 40)],
)
```
You can also specify a list of metric constraints to be satisfied via the argument `metric_constraints`. Each element in the `metric_constraints` list is a tuple that consists of (1) a string specifying the name of the metric (the metric name must be defined and returned in the user-defined `evaluation_function`); (2) an operation chosen from "<=" or ">="; (3) a numerical threshold.
In the following code example, we constrain the metric `score` to be no larger than 0.4.
In the following code example, we constrain the metric `training_cost` to be no larger than 1 second.
```python
flaml.tune.run(evaluation_function=evaluate_config, mode="min",
config=config_search_space,
metric_constraints=[("score", "<=", 0.4)],...)
analysis = tune.run(...,
metric_constraints = [("training_cost", "<=", 1)]),
```
#### **`config_constraints` vs `metric_constraints`:**
The key difference between these two types of constraints is that the calculation of constraints in `config_constraints` does not rely on the computation procedure in the evaluation function, i.e., in `evaluation_function`. For example, when a constraint only depends on the config itself, as shown in the code example. Due to this independency, constraints in `config_constraints` will be checked before evaluation. So configurations that do not satisfy `config_constraints` will not be evaluated.
### Parallel tuning
Related arguments: