@@ -8,7 +8,7 @@
|
||||
# A Fast Library for Automated Machine Learning & Tuning
|
||||
|
||||
<p align="center">
|
||||
<img src="https://github.com/microsoft/FLAML/blob/main/website/static/img/FLAML.png" width=200>
|
||||
<img src="https://github.com/microsoft/FLAML/blob/main/website/static/img/flaml.svg" width=200>
|
||||
<br>
|
||||
</p>
|
||||
|
||||
|
||||
@@ -470,7 +470,7 @@ class AutoML(BaseEstimator):
|
||||
'classification', 'regression', 'ts_forecast', 'rank',
|
||||
'seq-classification', 'seq-regression', 'summarization'.
|
||||
n_jobs: An integer of the number of threads for training.
|
||||
log_file_name: A string of the log file name. To disable logging,
|
||||
log_file_name: A string of the log file name | default="". To disable logging,
|
||||
set it to be an empty string "".
|
||||
estimator_list: A list of strings for estimator names, or 'auto'
|
||||
e.g., ```['lgbm', 'xgboost', 'xgb_limitdepth', 'catboost', 'rf', 'extra_tree']```
|
||||
@@ -714,13 +714,11 @@ class AutoML(BaseEstimator):
|
||||
"""Time taken to find best model in seconds."""
|
||||
return self.__dict__.get("_time_taken_best_iter")
|
||||
|
||||
def predict(
|
||||
self, X_test: Union[np.array, pd.DataFrame, List[str], List[List[str]]]
|
||||
):
|
||||
def predict(self, X: Union[np.array, pd.DataFrame, List[str], List[List[str]]]):
|
||||
"""Predict label from features.
|
||||
|
||||
Args:
|
||||
X_test: A numpy array of featurized instances, shape n * m,
|
||||
X: A numpy array of featurized instances, shape n * m,
|
||||
or for 'ts_forecast' task:
|
||||
a pandas dataframe with the first column containing
|
||||
timestamp values (datetime type) or an integer n for
|
||||
@@ -748,8 +746,8 @@ class AutoML(BaseEstimator):
|
||||
"No estimator is trained. Please run fit with enough budget."
|
||||
)
|
||||
return None
|
||||
X_test = self._preprocess(X_test)
|
||||
y_pred = estimator.predict(X_test)
|
||||
X = self._preprocess(X)
|
||||
y_pred = estimator.predict(X)
|
||||
if (
|
||||
isinstance(y_pred, np.ndarray)
|
||||
and y_pred.ndim > 1
|
||||
@@ -763,12 +761,12 @@ class AutoML(BaseEstimator):
|
||||
else:
|
||||
return y_pred
|
||||
|
||||
def predict_proba(self, X_test):
|
||||
def predict_proba(self, X):
|
||||
"""Predict the probability of each class from features, only works for
|
||||
classification problems.
|
||||
|
||||
Args:
|
||||
X_test: A numpy array of featurized instances, shape n * m.
|
||||
X: A numpy array of featurized instances, shape n * m.
|
||||
|
||||
Returns:
|
||||
A numpy array of shape n * c. c is the # classes. Each element at
|
||||
@@ -780,8 +778,8 @@ class AutoML(BaseEstimator):
|
||||
"No estimator is trained. Please run fit with enough budget."
|
||||
)
|
||||
return None
|
||||
X_test = self._preprocess(X_test)
|
||||
proba = self._trained_estimator.predict_proba(X_test)
|
||||
X = self._preprocess(X)
|
||||
proba = self._trained_estimator.predict_proba(X)
|
||||
return proba
|
||||
|
||||
def _preprocess(self, X):
|
||||
@@ -1804,7 +1802,7 @@ class AutoML(BaseEstimator):
|
||||
'classification', 'regression', 'ts_forecast', 'rank',
|
||||
'seq-classification', 'seq-regression', 'summarization'
|
||||
n_jobs: An integer of the number of threads for training.
|
||||
log_file_name: A string of the log file name. To disable logging,
|
||||
log_file_name: A string of the log file name | default="". To disable logging,
|
||||
set it to be an empty string "".
|
||||
estimator_list: A list of strings for estimator names, or 'auto'
|
||||
e.g., ```['lgbm', 'xgboost', 'xgb_limitdepth', 'catboost', 'rf', 'extra_tree']```
|
||||
@@ -2001,7 +1999,7 @@ class AutoML(BaseEstimator):
|
||||
old_level = logger.getEffectiveLevel()
|
||||
self.verbose = verbose
|
||||
logger.setLevel(50 - verbose * 10)
|
||||
if (not mlflow or not mlflow.active_run()) and not logger.handlers:
|
||||
if not logger.handlers:
|
||||
# Add the console handler.
|
||||
_ch = logging.StreamHandler()
|
||||
_ch.setFormatter(logger_formatter)
|
||||
@@ -2315,7 +2313,7 @@ class AutoML(BaseEstimator):
|
||||
),
|
||||
key=lambda x: x.last_result["wall_clock_time"],
|
||||
)
|
||||
for _track_iter, trial in enumerate(trials):
|
||||
for self._track_iter, trial in enumerate(trials):
|
||||
result = trial.last_result
|
||||
better = False
|
||||
if result:
|
||||
@@ -2326,20 +2324,20 @@ class AutoML(BaseEstimator):
|
||||
wall_time = result.get("wall_clock_time")
|
||||
if wall_time is not None:
|
||||
self._state.time_from_start = wall_time
|
||||
self._iter_per_learner[estimator] += 1
|
||||
if search_state.sample_size == self._state.data_size[0]:
|
||||
self._iter_per_learner[estimator] += 1
|
||||
if not self._fullsize_reached:
|
||||
self._fullsize_reached = True
|
||||
if search_state.best_loss < self._state.best_loss:
|
||||
self._state.best_loss = search_state.best_loss
|
||||
self._best_estimator = estimator
|
||||
self._config_history[_track_iter] = (
|
||||
self._config_history[self._track_iter] = (
|
||||
self._best_estimator,
|
||||
config,
|
||||
self._time_taken_best_iter,
|
||||
)
|
||||
self._trained_estimator = search_state.trained_estimator
|
||||
self._best_iteration = _track_iter
|
||||
self._best_iteration = self._track_iter
|
||||
self._time_taken_best_iter = self._state.time_from_start
|
||||
better = True
|
||||
self._search_states[estimator].best_config = config
|
||||
@@ -2360,7 +2358,7 @@ class AutoML(BaseEstimator):
|
||||
)
|
||||
if mlflow is not None and mlflow.active_run():
|
||||
with mlflow.start_run(nested=True):
|
||||
mlflow.log_metric("iter_counter", self._iter_per_learner[estimator])
|
||||
mlflow.log_metric("iter_counter", self._track_iter)
|
||||
if "intermediate_results" in search_state.metric_for_logging:
|
||||
for each_entry in search_state.metric_for_logging[
|
||||
"intermediate_results"
|
||||
@@ -2558,8 +2556,9 @@ class AutoML(BaseEstimator):
|
||||
self._state.time_from_start = wall_time
|
||||
# logger.info(f"{self._search_states[estimator].sample_size}, {data_size}")
|
||||
if search_state.sample_size == self._state.data_size[0]:
|
||||
self._iter_per_learner[estimator] += 1
|
||||
self._iter_per_learner_fullsize[estimator] += 1
|
||||
self._fullsize_reached = True
|
||||
self._iter_per_learner[estimator] += 1
|
||||
if search_state.best_loss < self._state.best_loss:
|
||||
best_config_sig = estimator + search_state.get_hist_config_sig(
|
||||
self.data_size_full, search_state.best_config
|
||||
@@ -2681,6 +2680,7 @@ class AutoML(BaseEstimator):
|
||||
self._config_history = {}
|
||||
self._max_iter_per_learner = 10000
|
||||
self._iter_per_learner = dict([(e, 0) for e in self.estimator_list])
|
||||
self._iter_per_learner_fullsize = dict([(e, 0) for e in self.estimator_list])
|
||||
self._fullsize_reached = False
|
||||
self._trained_estimator = None
|
||||
self._best_estimator = None
|
||||
@@ -2849,7 +2849,8 @@ class AutoML(BaseEstimator):
|
||||
if (
|
||||
self._search_states[estimator].time2eval_best
|
||||
> self._state.time_budget - self._state.time_from_start
|
||||
or self._iter_per_learner[estimator] >= self._max_iter_per_learner
|
||||
or self._iter_per_learner_fullsize[estimator]
|
||||
>= self._max_iter_per_learner
|
||||
):
|
||||
inv.append(0)
|
||||
continue
|
||||
|
||||
168
flaml/model.py
@@ -16,6 +16,8 @@ from sklearn.dummy import DummyClassifier, DummyRegressor
|
||||
from scipy.sparse import issparse
|
||||
import logging
|
||||
import shutil
|
||||
from pandas import DataFrame, Series, to_datetime
|
||||
import sys
|
||||
from . import tune
|
||||
from .data import (
|
||||
group_counts,
|
||||
@@ -31,10 +33,6 @@ from .data import (
|
||||
MULTICHOICECLASSIFICATION,
|
||||
)
|
||||
|
||||
import pandas as pd
|
||||
from pandas import DataFrame, Series
|
||||
import sys
|
||||
|
||||
try:
|
||||
import psutil
|
||||
except ImportError:
|
||||
@@ -199,32 +197,32 @@ class BaseEstimator:
|
||||
train_time = self._fit(X_train, y_train, **kwargs)
|
||||
return train_time
|
||||
|
||||
def predict(self, X_test):
|
||||
def predict(self, X):
|
||||
"""Predict label from features.
|
||||
|
||||
Args:
|
||||
X_test: A numpy array or a dataframe of featurized instances, shape n*m.
|
||||
X: A numpy array or a dataframe of featurized instances, shape n*m.
|
||||
|
||||
Returns:
|
||||
A numpy array of shape n*1.
|
||||
Each element is the label for a instance.
|
||||
"""
|
||||
if self._model is not None:
|
||||
X_test = self._preprocess(X_test)
|
||||
return self._model.predict(X_test)
|
||||
X = self._preprocess(X)
|
||||
return self._model.predict(X)
|
||||
else:
|
||||
logger.warning(
|
||||
"Estimator is not fit yet. Please run fit() before predict()."
|
||||
)
|
||||
return np.ones(X_test.shape[0])
|
||||
return np.ones(X.shape[0])
|
||||
|
||||
def predict_proba(self, X_test):
|
||||
def predict_proba(self, X):
|
||||
"""Predict the probability of each class from features.
|
||||
|
||||
Only works for classification problems
|
||||
|
||||
Args:
|
||||
X_test: A numpy array of featurized instances, shape n*m.
|
||||
X: A numpy array of featurized instances, shape n*m.
|
||||
|
||||
Returns:
|
||||
A numpy array of shape n*c. c is the # classes.
|
||||
@@ -233,8 +231,8 @@ class BaseEstimator:
|
||||
"""
|
||||
assert self._task in CLASSIFICATION, "predict_proba() only for classification."
|
||||
|
||||
X_test = self._preprocess(X_test)
|
||||
return self._model.predict_proba(X_test)
|
||||
X = self._preprocess(X)
|
||||
return self._model.predict_proba(X)
|
||||
|
||||
def cleanup(self):
|
||||
del self._model
|
||||
@@ -380,7 +378,11 @@ class TransformersEstimator(BaseEstimator):
|
||||
|
||||
if is_str or is_list_of_str:
|
||||
return tokenize_text(
|
||||
X=X, Y=y, task=self._task, custom_hpo_args=self.custom_hpo_args
|
||||
X=X,
|
||||
Y=y,
|
||||
task=self._task,
|
||||
custom_hpo_args=self.custom_hpo_args,
|
||||
tokenizer=self._tokenizer,
|
||||
)
|
||||
else:
|
||||
return X, None
|
||||
@@ -400,9 +402,8 @@ class TransformersEstimator(BaseEstimator):
|
||||
|
||||
transformers.logging.set_verbosity_error()
|
||||
|
||||
from transformers import EarlyStoppingCallback
|
||||
from transformers import TrainerCallback
|
||||
from transformers.trainer_utils import set_seed
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from datasets import Dataset
|
||||
from .nlp.utils import (
|
||||
@@ -422,10 +423,11 @@ class TransformersEstimator(BaseEstimator):
|
||||
# else:
|
||||
from .nlp.huggingface.trainer import TrainerForAuto
|
||||
from .nlp.huggingface.data_collator import DataCollatorForAuto
|
||||
from .nlp.utils import get_auto_tokenizer
|
||||
|
||||
this_params = self.params
|
||||
|
||||
class EarlyStoppingCallbackForAuto(EarlyStoppingCallback):
|
||||
class EarlyStoppingCallbackForAuto(TrainerCallback):
|
||||
def on_train_begin(self, args, state, control, **callback_kwargs):
|
||||
self.train_begin_time = time.time()
|
||||
|
||||
@@ -459,6 +461,10 @@ class TransformersEstimator(BaseEstimator):
|
||||
set_seed(self.params.get("seed", self._TrainingArguments.seed))
|
||||
|
||||
self._init_hpo_args(kwargs)
|
||||
self._tokenizer = get_auto_tokenizer(
|
||||
self.custom_hpo_args.model_path, self._task
|
||||
)
|
||||
|
||||
self._metric = kwargs["metric"]
|
||||
self.use_ray = kwargs.get("use_ray")
|
||||
|
||||
@@ -477,12 +483,6 @@ class TransformersEstimator(BaseEstimator):
|
||||
TransformersEstimator._join(self._X_train, self._y_train)
|
||||
)
|
||||
|
||||
# TODO: set a breakpoint here, observe the resulting train_dataset,
|
||||
# compare it with the output of the tokenized results in your transformer example
|
||||
# for example, if your task is MULTIPLECHOICE, you need to compare train_dataset with
|
||||
# the output of https://github.com/huggingface/transformers/blob/master/examples/pytorch/multiple-choice/run_swag.py#L329
|
||||
# make sure they are the same
|
||||
|
||||
if X_val is not None:
|
||||
if (self._task not in NLG_TASKS) and (self._task != TOKENCLASSIFICATION):
|
||||
self._X_val, _ = self._preprocess(X=X_val, **kwargs)
|
||||
@@ -495,13 +495,7 @@ class TransformersEstimator(BaseEstimator):
|
||||
else:
|
||||
eval_dataset = None
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
self.custom_hpo_args.model_path, use_fast=True
|
||||
)
|
||||
self._tokenizer = tokenizer
|
||||
|
||||
num_labels = get_num_labels(self._task, self._y_train)
|
||||
|
||||
training_args_config, per_model_config = separate_config(
|
||||
self.params, self._task
|
||||
)
|
||||
@@ -538,6 +532,7 @@ class TransformersEstimator(BaseEstimator):
|
||||
eval_steps=ckpt_freq,
|
||||
evaluate_during_training=True,
|
||||
save_steps=ckpt_freq,
|
||||
logging_steps=ckpt_freq,
|
||||
save_total_limit=0,
|
||||
metric_for_best_model="loss",
|
||||
fp16=self.custom_hpo_args.fp16,
|
||||
@@ -553,6 +548,7 @@ class TransformersEstimator(BaseEstimator):
|
||||
do_eval=True,
|
||||
per_device_eval_batch_size=1,
|
||||
eval_steps=ckpt_freq,
|
||||
logging_steps=ckpt_freq,
|
||||
evaluation_strategy=IntervalStrategy.STEPS,
|
||||
save_steps=ckpt_freq,
|
||||
save_total_limit=0,
|
||||
@@ -566,9 +562,9 @@ class TransformersEstimator(BaseEstimator):
|
||||
model_init=partial(self._model_init, num_labels, per_model_config),
|
||||
train_dataset=train_dataset,
|
||||
eval_dataset=eval_dataset,
|
||||
tokenizer=tokenizer,
|
||||
tokenizer=self._tokenizer,
|
||||
data_collator=DataCollatorForAuto(
|
||||
tokenizer=tokenizer,
|
||||
tokenizer=self._tokenizer,
|
||||
pad_to_multiple_of=8 if training_args.fp16 else None,
|
||||
)
|
||||
if self._task == MULTICHOICECLASSIFICATION
|
||||
@@ -599,6 +595,13 @@ class TransformersEstimator(BaseEstimator):
|
||||
num_labels=self._num_labels,
|
||||
per_model_config=self._per_model_config,
|
||||
)
|
||||
if hasattr(self._trainer, "intermediate_results"):
|
||||
self.intermediate_results = [
|
||||
x[1]
|
||||
for x in sorted(
|
||||
self._trainer.intermediate_results.items(), key=lambda x: x[0]
|
||||
)
|
||||
]
|
||||
self._trainer = None
|
||||
|
||||
def _delete_one_ckpt(self, ckpt_location):
|
||||
@@ -634,8 +637,8 @@ class TransformersEstimator(BaseEstimator):
|
||||
f"{PREFIX_CHECKPOINT_DIR}-{best_ckpt_global_step}",
|
||||
)
|
||||
self.params[self.ITER_HP] = best_ckpt_global_step
|
||||
print(trainer.state.global_step)
|
||||
print(trainer.ckpt_to_global_step)
|
||||
logger.debug(trainer.state.global_step)
|
||||
logger.debug(trainer.ckpt_to_global_step)
|
||||
return best_ckpt
|
||||
|
||||
def _compute_metrics_by_dataset_name(self, eval_pred):
|
||||
@@ -663,13 +666,13 @@ class TransformersEstimator(BaseEstimator):
|
||||
if self._task == TOKENCLASSIFICATION
|
||||
else np.argmax(predictions, axis=1)
|
||||
)
|
||||
return {
|
||||
"val_loss": metric_loss_score(
|
||||
metric_dict = {
|
||||
"automl_metric": metric_loss_score(
|
||||
metric_name=self._metric, y_predict=predictions, y_true=labels
|
||||
)
|
||||
}
|
||||
else:
|
||||
agg_metric, metric_dict = self._metric(
|
||||
loss, metric_dict = self._metric(
|
||||
X_test=self._X_val,
|
||||
y_test=self._y_val,
|
||||
estimator=self,
|
||||
@@ -677,14 +680,11 @@ class TransformersEstimator(BaseEstimator):
|
||||
X_train=self._X_train,
|
||||
y_train=self._y_train,
|
||||
)
|
||||
if not hasattr(self, "intermediate_results"):
|
||||
self.intermediate_results = []
|
||||
self.intermediate_results.append(metric_dict)
|
||||
return metric_dict
|
||||
metric_dict["automl_metric"] = loss
|
||||
return metric_dict
|
||||
|
||||
def _init_model_for_predict(self, X_test):
|
||||
from datasets import Dataset
|
||||
from transformers import AutoTokenizer
|
||||
from .nlp.huggingface.trainer import TrainerForAuto
|
||||
from .nlp.huggingface.data_collator import DataCollatorForPredict
|
||||
|
||||
@@ -695,14 +695,11 @@ class TransformersEstimator(BaseEstimator):
|
||||
output_dir=self.custom_hpo_args.output_dir,
|
||||
**self._training_args_config,
|
||||
)
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
self.custom_hpo_args.model_path, use_fast=True
|
||||
)
|
||||
self._trainer = TrainerForAuto(
|
||||
model=self._model,
|
||||
args=training_args,
|
||||
data_collator=DataCollatorForPredict(
|
||||
tokenizer=tokenizer,
|
||||
tokenizer=self._tokenizer,
|
||||
pad_to_multiple_of=8 if training_args.fp16 else None,
|
||||
)
|
||||
if self._task == MULTICHOICECLASSIFICATION
|
||||
@@ -711,18 +708,18 @@ class TransformersEstimator(BaseEstimator):
|
||||
)
|
||||
return test_dataset, training_args
|
||||
|
||||
def predict_proba(self, X_test):
|
||||
def predict_proba(self, X):
|
||||
assert (
|
||||
self._task in CLASSIFICATION
|
||||
), "predict_proba() only for classification tasks."
|
||||
|
||||
test_dataset, _ = self._init_model_for_predict(X_test)
|
||||
test_dataset, _ = self._init_model_for_predict(X)
|
||||
predictions = self._trainer.predict(test_dataset)
|
||||
self._trainer = None
|
||||
return predictions.predictions
|
||||
|
||||
def predict(self, X_test):
|
||||
test_dataset, training_args = self._init_model_for_predict(X_test)
|
||||
def predict(self, X):
|
||||
test_dataset, training_args = self._init_model_for_predict(X)
|
||||
if self._task not in NLG_TASKS:
|
||||
predictions = self._trainer.predict(test_dataset)
|
||||
else:
|
||||
@@ -738,9 +735,6 @@ class TransformersEstimator(BaseEstimator):
|
||||
return predictions.predictions.reshape((len(predictions.predictions),))
|
||||
elif self._task == TOKENCLASSIFICATION:
|
||||
return np.argmax(predictions.predictions, axis=2)
|
||||
# TODO: elif self._task == your task, return the corresponding prediction
|
||||
# e.g., if your task == QUESTIONANSWERING, you need to return the answer instead
|
||||
# of the index
|
||||
elif self._task == SUMMARIZATION:
|
||||
if isinstance(predictions.predictions, tuple):
|
||||
predictions = np.argmax(predictions.predictions[0], axis=2)
|
||||
@@ -1114,12 +1108,12 @@ class XGBoostEstimator(SKLearnEstimator):
|
||||
train_time = time.time() - start_time
|
||||
return train_time
|
||||
|
||||
def predict(self, X_test):
|
||||
def predict(self, X):
|
||||
import xgboost as xgb
|
||||
|
||||
if not issparse(X_test):
|
||||
X_test = self._preprocess(X_test)
|
||||
dtest = xgb.DMatrix(X_test)
|
||||
if not issparse(X):
|
||||
X = self._preprocess(X)
|
||||
dtest = xgb.DMatrix(X)
|
||||
return super().predict(dtest)
|
||||
|
||||
@classmethod
|
||||
@@ -1604,22 +1598,22 @@ class Prophet(SKLearnEstimator):
|
||||
self._model = model
|
||||
return train_time
|
||||
|
||||
def predict(self, X_test):
|
||||
if isinstance(X_test, int):
|
||||
def predict(self, X):
|
||||
if isinstance(X, int):
|
||||
raise ValueError(
|
||||
"predict() with steps is only supported for arima/sarimax."
|
||||
" For Prophet, pass a dataframe with the first column containing"
|
||||
" the timestamp values."
|
||||
)
|
||||
if self._model is not None:
|
||||
X_test = self._preprocess(X_test)
|
||||
forecast = self._model.predict(X_test)
|
||||
X = self._preprocess(X)
|
||||
forecast = self._model.predict(X)
|
||||
return forecast["yhat"]
|
||||
else:
|
||||
logger.warning(
|
||||
"Estimator is not fit yet. Please run fit() before predict()."
|
||||
)
|
||||
return np.ones(X_test.shape[0])
|
||||
return np.ones(X.shape[0])
|
||||
|
||||
|
||||
class ARIMA(Prophet):
|
||||
@@ -1648,7 +1642,7 @@ class ARIMA(Prophet):
|
||||
|
||||
def _join(self, X_train, y_train):
|
||||
train_df = super()._join(X_train, y_train)
|
||||
train_df.index = pd.to_datetime(train_df[TS_TIMESTAMP_COL])
|
||||
train_df.index = to_datetime(train_df[TS_TIMESTAMP_COL])
|
||||
train_df = train_df.drop(TS_TIMESTAMP_COL, axis=1)
|
||||
return train_df
|
||||
|
||||
@@ -1684,30 +1678,30 @@ class ARIMA(Prophet):
|
||||
self._model = model
|
||||
return train_time
|
||||
|
||||
def predict(self, X_test):
|
||||
def predict(self, X):
|
||||
if self._model is not None:
|
||||
if isinstance(X_test, int):
|
||||
forecast = self._model.forecast(steps=X_test)
|
||||
elif isinstance(X_test, DataFrame):
|
||||
start = X_test[TS_TIMESTAMP_COL].iloc[0]
|
||||
end = X_test[TS_TIMESTAMP_COL].iloc[-1]
|
||||
if len(X_test.columns) > 1:
|
||||
X_test = self._preprocess(X_test.drop(columns=TS_TIMESTAMP_COL))
|
||||
regressors = list(X_test)
|
||||
print(start, end, X_test.shape)
|
||||
if isinstance(X, int):
|
||||
forecast = self._model.forecast(steps=X)
|
||||
elif isinstance(X, DataFrame):
|
||||
start = X[TS_TIMESTAMP_COL].iloc[0]
|
||||
end = X[TS_TIMESTAMP_COL].iloc[-1]
|
||||
if len(X.columns) > 1:
|
||||
X = self._preprocess(X.drop(columns=TS_TIMESTAMP_COL))
|
||||
regressors = list(X)
|
||||
print(start, end, X.shape)
|
||||
forecast = self._model.predict(
|
||||
start=start, end=end, exog=X_test[regressors]
|
||||
start=start, end=end, exog=X[regressors]
|
||||
)
|
||||
else:
|
||||
forecast = self._model.predict(start=start, end=end)
|
||||
else:
|
||||
raise ValueError(
|
||||
"X_test needs to be either a pandas Dataframe with dates as the first column"
|
||||
"X needs to be either a pandas Dataframe with dates as the first column"
|
||||
" or an int number of periods for predict()."
|
||||
)
|
||||
return forecast
|
||||
else:
|
||||
return np.ones(X_test if isinstance(X_test, int) else X_test.shape[0])
|
||||
return np.ones(X if isinstance(X, int) else X.shape[0])
|
||||
|
||||
|
||||
class SARIMAX(ARIMA):
|
||||
@@ -1829,7 +1823,7 @@ class TS_SKLearn_Regressor(SKLearnEstimator):
|
||||
cols = list(X)
|
||||
if len(cols) == 1:
|
||||
ds_col = cols[0]
|
||||
X = pd.DataFrame(index=X[ds_col])
|
||||
X = DataFrame(index=X[ds_col])
|
||||
elif len(cols) > 1:
|
||||
ds_col = cols[0]
|
||||
exog_cols = cols[1:]
|
||||
@@ -1879,42 +1873,40 @@ class TS_SKLearn_Regressor(SKLearnEstimator):
|
||||
train_time = time.time() - current_time
|
||||
return train_time
|
||||
|
||||
def predict(self, X_test):
|
||||
def predict(self, X):
|
||||
if self._model is not None:
|
||||
X_test = self.transform_X(X_test)
|
||||
X_test = self._preprocess(X_test)
|
||||
X = self.transform_X(X)
|
||||
X = self._preprocess(X)
|
||||
if isinstance(self._model, list):
|
||||
assert len(self._model) == len(
|
||||
X_test
|
||||
), "Model is optimized for horizon, length of X_test must be equal to `period`."
|
||||
X
|
||||
), "Model is optimized for horizon, length of X must be equal to `period`."
|
||||
preds = []
|
||||
for i in range(1, len(self._model) + 1):
|
||||
(
|
||||
X_pred,
|
||||
_,
|
||||
) = self.hcrystaball_model._transform_data_to_tsmodel_input_format(
|
||||
X_test.iloc[:i, :]
|
||||
X.iloc[:i, :]
|
||||
)
|
||||
preds.append(self._model[i - 1].predict(X_pred)[-1])
|
||||
forecast = pd.DataFrame(
|
||||
forecast = DataFrame(
|
||||
data=np.asarray(preds).reshape(-1, 1),
|
||||
columns=[self.hcrystaball_model.name],
|
||||
index=X_test.index,
|
||||
index=X.index,
|
||||
)
|
||||
else:
|
||||
(
|
||||
X_pred,
|
||||
_,
|
||||
) = self.hcrystaball_model._transform_data_to_tsmodel_input_format(
|
||||
X_test
|
||||
)
|
||||
) = self.hcrystaball_model._transform_data_to_tsmodel_input_format(X)
|
||||
forecast = self._model.predict(X_pred)
|
||||
return forecast
|
||||
else:
|
||||
logger.warning(
|
||||
"Estimator is not fit yet. Please run fit() before predict()."
|
||||
)
|
||||
return np.ones(X_test.shape[0])
|
||||
return np.ones(X.shape[0])
|
||||
|
||||
|
||||
class LGBM_TS_Regressor(TS_SKLearn_Regressor):
|
||||
|
||||
@@ -1,65 +1,17 @@
|
||||
# Hyperparameter Optimization for Huggingface Transformers
|
||||
# AutoML for NLP
|
||||
|
||||
Fine-tuning pre-trained language models based on the transformers library.
|
||||
This directory contains utility functions used by AutoNLP. Currently we support four NLP tasks: sequence classification, sequence regression, multiple choice and summarization.
|
||||
|
||||
An example:
|
||||
Please refer to this [link](https://microsoft.github.io/FLAML/docs/Examples/AutoML-NLP) for examples.
|
||||
|
||||
```python
|
||||
from flaml import AutoML
|
||||
import pandas as pd
|
||||
|
||||
train_dataset = pd.read_csv("data/input/train.tsv", delimiter="\t", quoting=3)
|
||||
dev_dataset = pd.read_csv("data/input/dev.tsv", delimiter="\t", quoting=3)
|
||||
test_dataset = pd.read_csv("data/input/test.tsv", delimiter="\t", quoting=3)
|
||||
# Troubleshooting fine-tuning HPO for pre-trained language models
|
||||
|
||||
custom_sent_keys = ["#1 String", "#2 String"]
|
||||
label_key = "Quality"
|
||||
The frequent updates of transformers may lead to fluctuations in the results of tuning. To help users quickly troubleshoot the result of AutoNLP when a tuning failure occurs (e.g., failing to reproduce previous results), we have provided the following jupyter notebook:
|
||||
|
||||
X_train = train_dataset[custom_sent_keys]
|
||||
y_train = train_dataset[label_key]
|
||||
* [Troubleshooting HPO for fine-tuning pre-trained language models](https://github.com/microsoft/FLAML/blob/main/notebook/research/acl2021.ipynb)
|
||||
|
||||
X_val = dev_dataset[custom_sent_keys]
|
||||
y_val = dev_dataset[label_key]
|
||||
|
||||
X_test = test_dataset[custom_sent_keys]
|
||||
|
||||
automl = AutoML()
|
||||
|
||||
automl_settings = {
|
||||
"gpu_per_trial": 0, # use a value larger than 0 for GPU training
|
||||
"max_iter": 10,
|
||||
"time_budget": 300,
|
||||
"task": "seq-classification",
|
||||
"metric": "accuracy",
|
||||
}
|
||||
|
||||
automl_settings["custom_hpo_args"] = {
|
||||
"model_path": "google/electra-small-discriminator",
|
||||
"output_dir": "data/output/",
|
||||
"ckpt_per_epoch": 1,
|
||||
}
|
||||
|
||||
automl.fit(
|
||||
X_train=X_train, y_train=y_train, X_val=X_val, y_val=y_val, **automl_settings
|
||||
)
|
||||
automl.predict(X_test)
|
||||
|
||||
```
|
||||
|
||||
The current use cases that are supported:
|
||||
|
||||
1. A simplified version of fine-tuning the GLUE dataset using HuggingFace;
|
||||
2. For selecting better search space for fine-tuning the GLUE dataset;
|
||||
3. Use the search algorithms in flaml for more efficient fine-tuning of HuggingFace.
|
||||
|
||||
The use cases that can be supported in future:
|
||||
|
||||
1. HPO fine-tuning for text generation;
|
||||
2. HPO fine-tuning for question answering.
|
||||
|
||||
## Troubleshooting fine-tuning HPO for pre-trained language models
|
||||
|
||||
To reproduce the results for our ACL2021 paper:
|
||||
Our findings on troubleshooting fine-tuning the Electra and RoBERTa model for the GLUE dataset can be seen in the following paper published in ACL 2021:
|
||||
|
||||
* [An Empirical Study on Hyperparameter Optimization for Fine-Tuning Pre-trained Language Models](https://arxiv.org/abs/2106.09204). Xueqing Liu, Chi Wang. ACL-IJCNLP 2021.
|
||||
|
||||
@@ -72,4 +24,4 @@ To reproduce the results for our ACL2021 paper:
|
||||
}
|
||||
```
|
||||
|
||||
Please refer to the following jupyter notebook: [Troubleshooting HPO for fine-tuning pre-trained language models](https://github.com/microsoft/FLAML/blob/main/notebook/research/acl2021.ipynb)
|
||||
|
||||
|
||||
@@ -44,6 +44,19 @@ class TrainerForAuto(Seq2SeqTrainer):
|
||||
model, inputs, prediction_loss_only, ignore_keys
|
||||
)
|
||||
|
||||
def log(self, logs) -> None:
|
||||
if getattr(self, "_is_seq2seq", None):
|
||||
super().log(logs)
|
||||
else:
|
||||
super(Seq2SeqTrainer, self).log(logs)
|
||||
if not hasattr(self, "intermediate_results"):
|
||||
self.intermediate_results = {}
|
||||
|
||||
epoch_num = logs.get("epoch", None)
|
||||
if epoch_num:
|
||||
self.intermediate_results.setdefault(epoch_num, {})
|
||||
self.intermediate_results[epoch_num].update(logs)
|
||||
|
||||
def evaluate(
|
||||
self,
|
||||
eval_dataset=None,
|
||||
@@ -74,10 +87,6 @@ class TrainerForAuto(Seq2SeqTrainer):
|
||||
ignore_keys,
|
||||
metric_key_prefix,
|
||||
)
|
||||
# if metrics:
|
||||
# for key in list(metrics.keys()):
|
||||
# if key.startswith("eval_"):
|
||||
# metrics[key[5:]] = metrics.pop(key)
|
||||
if hasattr(self, "ckpt_to_global_step"):
|
||||
self.ckpt_to_global_step[ckpt_dir] = self.state.global_step
|
||||
if metrics:
|
||||
|
||||
@@ -30,34 +30,61 @@ def load_default_huggingface_metric_for_task(task):
|
||||
global tokenized_column_names
|
||||
|
||||
|
||||
def tokenize_text(X, Y=None, task=None, custom_hpo_args=None):
|
||||
def get_auto_tokenizer(model_path, task):
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
if task == SUMMARIZATION:
|
||||
return AutoTokenizer.from_pretrained(
|
||||
model_path, # 'roberta-base'
|
||||
cache_dir=None,
|
||||
use_fast=True,
|
||||
revision="main",
|
||||
use_auth_token=None,
|
||||
)
|
||||
else:
|
||||
return AutoTokenizer.from_pretrained(model_path, use_fast=True)
|
||||
|
||||
|
||||
def tokenize_text(X, Y=None, task=None, custom_hpo_args=None, tokenizer=None):
|
||||
if task in (SEQCLASSIFICATION, SEQREGRESSION):
|
||||
X_tokenized, _ = tokenize_onedataframe(
|
||||
X, this_tokenizer=None, task=task, custom_hpo_args=custom_hpo_args
|
||||
X_tokenized = tokenize_onedataframe(
|
||||
X,
|
||||
tokenizer=tokenizer,
|
||||
task=task,
|
||||
custom_hpo_args=custom_hpo_args,
|
||||
prefix_str="",
|
||||
)
|
||||
return X_tokenized, None
|
||||
elif task == TOKENCLASSIFICATION:
|
||||
return tokenize_text_tokclassification(X, Y, custom_hpo_args)
|
||||
return tokenize_text_tokclassification(
|
||||
X, Y, tokenizer=tokenizer, custom_hpo_args=custom_hpo_args
|
||||
)
|
||||
elif task in NLG_TASKS:
|
||||
return tokenize_seq2seq(X, Y, task=task, custom_hpo_args=custom_hpo_args)
|
||||
return tokenize_seq2seq(
|
||||
X, Y, tokenizer=tokenizer, task=task, custom_hpo_args=custom_hpo_args
|
||||
)
|
||||
elif task == MULTICHOICECLASSIFICATION:
|
||||
return tokenize_text_multiplechoice(X, custom_hpo_args)
|
||||
return tokenize_text_multiplechoice(
|
||||
X, tokenizer=tokenizer, custom_hpo_args=custom_hpo_args
|
||||
)
|
||||
|
||||
|
||||
def tokenize_seq2seq(X, Y, task=None, custom_hpo_args=None):
|
||||
model_inputs, tokenizer = tokenize_onedataframe(
|
||||
def tokenize_seq2seq(X, Y, tokenizer, task=None, custom_hpo_args=None):
|
||||
model_inputs = tokenize_onedataframe(
|
||||
X,
|
||||
this_tokenizer=None,
|
||||
tokenizer=tokenizer,
|
||||
task=task,
|
||||
custom_hpo_args=custom_hpo_args,
|
||||
prefix_str="summarize: ",
|
||||
)
|
||||
labels = None
|
||||
if Y is not None:
|
||||
labels, _ = tokenize_onedataframe(
|
||||
labels = tokenize_onedataframe(
|
||||
Y.to_frame(),
|
||||
this_tokenizer=tokenizer,
|
||||
tokenizer=tokenizer,
|
||||
task=task,
|
||||
custom_hpo_args=custom_hpo_args,
|
||||
prefix_str="",
|
||||
)
|
||||
labels["label"] = [
|
||||
[(each_l if each_l != tokenizer.pad_token_id else -100) for each_l in label]
|
||||
@@ -70,7 +97,7 @@ def tokenize_seq2seq(X, Y, task=None, custom_hpo_args=None):
|
||||
|
||||
|
||||
def tokenize_and_align_labels(
|
||||
examples, tokenizer, custom_hpo_args, X_sent_key, Y_sent_key=None
|
||||
examples, tokenizer, custom_hpo_args=None, X_sent_key=None, Y_sent_key=None
|
||||
):
|
||||
global tokenized_column_names
|
||||
|
||||
@@ -115,14 +142,10 @@ def tokenize_and_align_labels(
|
||||
return tokenized_input_and_labels
|
||||
|
||||
|
||||
def tokenize_text_tokclassification(X, Y, custom_hpo_args):
|
||||
from transformers import AutoTokenizer
|
||||
def tokenize_text_tokclassification(X, Y, tokenizer, custom_hpo_args=None):
|
||||
import pandas as pd
|
||||
|
||||
global tokenized_column_names
|
||||
this_tokenizer = AutoTokenizer.from_pretrained(
|
||||
custom_hpo_args.model_path, use_fast=True
|
||||
)
|
||||
if Y is not None:
|
||||
X_and_Y = pd.concat([X, Y.to_frame()], axis=1)
|
||||
X_key = list(X.keys())[0]
|
||||
@@ -130,7 +153,7 @@ def tokenize_text_tokclassification(X, Y, custom_hpo_args):
|
||||
X_and_Y_tokenized = X_and_Y.apply(
|
||||
lambda x: tokenize_and_align_labels(
|
||||
x,
|
||||
tokenizer=this_tokenizer,
|
||||
tokenizer=tokenizer,
|
||||
custom_hpo_args=custom_hpo_args,
|
||||
X_sent_key=X_key,
|
||||
Y_sent_key=Y_key,
|
||||
@@ -150,7 +173,7 @@ def tokenize_text_tokclassification(X, Y, custom_hpo_args):
|
||||
d = X.apply(
|
||||
lambda x: tokenize_and_align_labels(
|
||||
x,
|
||||
tokenizer=this_tokenizer,
|
||||
tokenizer=tokenizer,
|
||||
custom_hpo_args=custom_hpo_args,
|
||||
X_sent_key=X_key,
|
||||
Y_sent_key=None,
|
||||
@@ -167,37 +190,21 @@ def tokenize_text_tokclassification(X, Y, custom_hpo_args):
|
||||
|
||||
def tokenize_onedataframe(
|
||||
X,
|
||||
this_tokenizer=None,
|
||||
tokenizer,
|
||||
task=None,
|
||||
custom_hpo_args=None,
|
||||
prefix_str=None,
|
||||
):
|
||||
from transformers import AutoTokenizer
|
||||
import pandas
|
||||
|
||||
global tokenized_column_names
|
||||
|
||||
if this_tokenizer:
|
||||
with this_tokenizer.as_target_tokenizer():
|
||||
d = X.apply(
|
||||
lambda x: tokenize_row(
|
||||
x,
|
||||
this_tokenizer,
|
||||
prefix=("",) if task is SUMMARIZATION else None,
|
||||
task=task,
|
||||
custom_hpo_args=custom_hpo_args,
|
||||
),
|
||||
axis=1,
|
||||
result_type="expand",
|
||||
)
|
||||
else:
|
||||
this_tokenizer = AutoTokenizer.from_pretrained(
|
||||
custom_hpo_args.model_path, use_fast=True
|
||||
)
|
||||
with tokenizer.as_target_tokenizer():
|
||||
d = X.apply(
|
||||
lambda x: tokenize_row(
|
||||
x,
|
||||
this_tokenizer,
|
||||
prefix=("summarize: ",) if task is SUMMARIZATION else None,
|
||||
tokenizer,
|
||||
prefix=(prefix_str,) if task is SUMMARIZATION else None,
|
||||
task=task,
|
||||
custom_hpo_args=custom_hpo_args,
|
||||
),
|
||||
@@ -206,7 +213,7 @@ def tokenize_onedataframe(
|
||||
)
|
||||
X_tokenized = pandas.DataFrame(columns=tokenized_column_names)
|
||||
X_tokenized[tokenized_column_names] = d
|
||||
return X_tokenized, this_tokenizer
|
||||
return X_tokenized
|
||||
|
||||
|
||||
def postprocess_text(preds, labels):
|
||||
@@ -223,9 +230,7 @@ def postprocess_text(preds, labels):
|
||||
return preds, labels
|
||||
|
||||
|
||||
def tokenize_row(
|
||||
this_row, this_tokenizer, prefix=None, task=None, custom_hpo_args=None
|
||||
):
|
||||
def tokenize_row(this_row, tokenizer, prefix=None, task=None, custom_hpo_args=None):
|
||||
global tokenized_column_names
|
||||
assert (
|
||||
"max_seq_length" in custom_hpo_args.__dict__
|
||||
@@ -234,7 +239,7 @@ def tokenize_row(
|
||||
if prefix:
|
||||
this_row = tuple(["".join(x) for x in zip(prefix, this_row)])
|
||||
|
||||
tokenized_example = this_tokenizer(
|
||||
tokenized_example = tokenizer(
|
||||
*tuple(this_row),
|
||||
padding="max_length",
|
||||
max_length=custom_hpo_args.max_seq_length,
|
||||
@@ -246,22 +251,14 @@ def tokenize_row(
|
||||
return [tokenized_example[x] for x in tokenized_column_names]
|
||||
|
||||
|
||||
def tokenize_text_multiplechoice(X, custom_hpo_args):
|
||||
from transformers import AutoTokenizer
|
||||
def tokenize_text_multiplechoice(X, tokenizer, custom_hpo_args=None):
|
||||
import pandas
|
||||
|
||||
global tokenized_column_names
|
||||
|
||||
this_tokenizer = AutoTokenizer.from_pretrained(
|
||||
custom_hpo_args.model_path, # 'roberta-base'
|
||||
cache_dir=None,
|
||||
use_fast=True,
|
||||
revision="main",
|
||||
use_auth_token=None,
|
||||
)
|
||||
t = X[["sent1", "sent2", "ending0", "ending1", "ending2", "ending3"]]
|
||||
d = t.apply(
|
||||
lambda x: tokenize_swag(x, this_tokenizer, custom_hpo_args),
|
||||
lambda x: tokenize_swag(x, tokenizer, custom_hpo_args),
|
||||
axis=1,
|
||||
result_type="expand",
|
||||
)
|
||||
@@ -272,7 +269,7 @@ def tokenize_text_multiplechoice(X, custom_hpo_args):
|
||||
return output, None
|
||||
|
||||
|
||||
def tokenize_swag(this_row, this_tokenizer, custom_hpo_args):
|
||||
def tokenize_swag(this_row, tokenizer, custom_hpo_args=None):
|
||||
global tokenized_column_names
|
||||
|
||||
first_sentences = [[this_row["sent1"]] * 4]
|
||||
@@ -289,7 +286,7 @@ def tokenize_swag(this_row, this_tokenizer, custom_hpo_args):
|
||||
# From 2 dimension to 1 dimension array
|
||||
first_sentences = list(chain(*first_sentences))
|
||||
|
||||
tokenized_example = this_tokenizer(
|
||||
tokenized_example = tokenizer(
|
||||
*tuple([first_sentences, second_sentences]),
|
||||
truncation=True,
|
||||
max_length=custom_hpo_args.max_seq_length,
|
||||
@@ -411,10 +408,7 @@ def load_model(checkpoint_path, task, num_labels, per_model_config=None):
|
||||
)
|
||||
from ..data import SEQCLASSIFICATION, SEQREGRESSION, TOKENCLASSIFICATION
|
||||
|
||||
this_model_type = AutoConfig.from_pretrained(checkpoint_path).model_type
|
||||
this_vocab_size = AutoConfig.from_pretrained(checkpoint_path).vocab_size
|
||||
|
||||
def get_this_model(task):
|
||||
def get_this_model(task, model_config):
|
||||
from transformers import AutoModelForSequenceClassification
|
||||
from transformers import AutoModelForSeq2SeqLM
|
||||
from transformers import AutoModelForMultipleChoice
|
||||
@@ -463,28 +457,34 @@ def load_model(checkpoint_path, task, num_labels, per_model_config=None):
|
||||
model_config = AutoConfig.from_pretrained(checkpoint_path)
|
||||
return model_config
|
||||
|
||||
current_config = AutoConfig.from_pretrained(checkpoint_path)
|
||||
this_model_type, this_vocab_size = (
|
||||
current_config.model_type,
|
||||
current_config.vocab_size,
|
||||
)
|
||||
|
||||
if task == SEQCLASSIFICATION:
|
||||
num_labels_old = AutoConfig.from_pretrained(checkpoint_path).num_labels
|
||||
num_labels_old = current_config.num_labels
|
||||
if is_pretrained_model_in_classification_head_list(this_model_type):
|
||||
model_config_num_labels = num_labels_old
|
||||
else:
|
||||
model_config_num_labels = num_labels
|
||||
model_config = _set_model_config(checkpoint_path)
|
||||
new_config = _set_model_config(checkpoint_path)
|
||||
|
||||
if is_pretrained_model_in_classification_head_list(this_model_type):
|
||||
if num_labels != num_labels_old:
|
||||
this_model = get_this_model(task)
|
||||
model_config.num_labels = num_labels
|
||||
this_model = get_this_model(task, new_config)
|
||||
new_config.num_labels = num_labels
|
||||
this_model.num_labels = num_labels
|
||||
this_model.classifier = (
|
||||
AutoSeqClassificationHead.from_model_type_and_config(
|
||||
this_model_type, model_config
|
||||
this_model_type, new_config
|
||||
)
|
||||
)
|
||||
else:
|
||||
this_model = get_this_model(task)
|
||||
this_model = get_this_model(task, new_config)
|
||||
else:
|
||||
this_model = get_this_model(task)
|
||||
this_model = get_this_model(task, new_config)
|
||||
this_model.resize_token_embeddings(this_vocab_size)
|
||||
return this_model
|
||||
else:
|
||||
@@ -493,7 +493,7 @@ def load_model(checkpoint_path, task, num_labels, per_model_config=None):
|
||||
elif task == TOKENCLASSIFICATION:
|
||||
model_config_num_labels = num_labels
|
||||
model_config = _set_model_config(checkpoint_path)
|
||||
this_model = get_this_model(task)
|
||||
this_model = get_this_model(task, model_config)
|
||||
return this_model
|
||||
|
||||
|
||||
|
||||
@@ -100,7 +100,7 @@ class BlendSearch(Searcher):
|
||||
needed for a config.
|
||||
It is used to skip configs which do not fit in memory.
|
||||
metric_constraints: A list of metric constraints to be satisfied.
|
||||
E.g., `['precision', '>=', 0.9]`.
|
||||
E.g., `['precision', '>=', 0.9]`. The sign can be ">=" or "<=".
|
||||
seed: An integer of the random seed.
|
||||
experimental: A bool of whether to use experimental features.
|
||||
"""
|
||||
@@ -155,7 +155,7 @@ class BlendSearch(Searcher):
|
||||
from functools import partial
|
||||
|
||||
gs_space = partial(define_by_run_func, space=space)
|
||||
evaluated_rewards = None # not supproted by define-by-run
|
||||
evaluated_rewards = None # not supported by define-by-run
|
||||
else:
|
||||
gs_space = space
|
||||
gs_seed = seed - 10 if (seed - 10) >= 0 else seed - 11 + (1 << 32)
|
||||
@@ -748,6 +748,10 @@ class BlendSearch(Searcher):
|
||||
and value > threshold
|
||||
or sign == ">="
|
||||
and value < threshold
|
||||
or sign == ">"
|
||||
and value <= threshold
|
||||
or sign == "<"
|
||||
and value > threshold
|
||||
):
|
||||
self._result[config_signature] = {
|
||||
self._metric: np.inf * self._ls.metric_op,
|
||||
|
||||
@@ -123,7 +123,7 @@ class Searcher:
|
||||
mod in ["min", "max", "obs"] for mod in mode
|
||||
), "All of mode must be 'min' or 'max' or 'obs'!"
|
||||
else:
|
||||
raise ValueError("Mode most either be a list or string")
|
||||
raise ValueError("Mode must either be a list or string")
|
||||
|
||||
def set_search_properties(
|
||||
self, metric: Optional[str], mode: Optional[str], config: Dict
|
||||
|
||||
@@ -22,6 +22,11 @@ import numpy
|
||||
import random
|
||||
from ..tune.sample import Categorical, Domain, RandomState
|
||||
|
||||
try:
|
||||
from ray.tune.sample import Domain as RayDomain
|
||||
except ImportError:
|
||||
RayDomain = Domain
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -192,10 +197,10 @@ def _resolve_domain_vars(
|
||||
)
|
||||
except RecursiveDependencyError as e:
|
||||
error = e
|
||||
except Exception:
|
||||
raise ValueError(
|
||||
"Failed to evaluate expression: {}: {}".format(path, domain)
|
||||
)
|
||||
# except Exception:
|
||||
# raise ValueError(
|
||||
# "Failed to evaluate expression: {}: {}".format(path, domain)
|
||||
# )
|
||||
else:
|
||||
assign_value(spec, path, value)
|
||||
resolved[path] = value
|
||||
@@ -243,7 +248,7 @@ def _is_resolved(v) -> bool:
|
||||
|
||||
|
||||
def _try_resolve(v) -> Tuple[bool, Any]:
|
||||
if isinstance(v, Domain):
|
||||
if isinstance(v, (Domain, RayDomain)):
|
||||
# Domain to sample from
|
||||
return False, v
|
||||
elif isinstance(v, dict) and len(v) == 1 and "grid_search" in v:
|
||||
|
||||
@@ -61,7 +61,15 @@ def define_by_run_func(trial, space: Dict, path: str = "") -> Optional[Dict[str,
|
||||
elif isinstance(sampler, sample.Uniform):
|
||||
if quantize:
|
||||
trial.suggest_float(key, domain.lower, domain.upper, step=quantize)
|
||||
trial.suggest_float(key, domain.lower, domain.upper)
|
||||
else:
|
||||
trial.suggest_float(key, domain.lower, domain.upper)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Optuna search does not support parameters of type "
|
||||
"`{}` with samplers of type `{}`".format(
|
||||
type(domain).__name__, type(domain.sampler).__name__
|
||||
)
|
||||
)
|
||||
elif isinstance(domain, sample.Integer):
|
||||
if isinstance(sampler, sample.LogUniform):
|
||||
trial.suggest_int(
|
||||
@@ -144,6 +152,8 @@ def unflatten_hierarchical(config: Dict, space: Dict) -> Tuple[Dict, Dict]:
|
||||
key = key[:-8]
|
||||
domain = space.get(key)
|
||||
if domain is not None:
|
||||
if isinstance(domain, dict):
|
||||
value, domain = unflatten_hierarchical(value, domain)
|
||||
subspace[key] = domain
|
||||
if isinstance(domain, sample.Domain):
|
||||
sampler = domain.sampler
|
||||
@@ -404,8 +414,8 @@ def denormalize(
|
||||
elif str(sampler) == "Normal":
|
||||
# denormalization for 'Normal'
|
||||
config_denorm[key] = value * sampler.sd + sampler.mean
|
||||
else:
|
||||
config_denorm[key] = value
|
||||
# else:
|
||||
# config_denorm[key] = value
|
||||
# Handle quantized
|
||||
if quantize is not None:
|
||||
config_denorm[key] = (
|
||||
@@ -419,6 +429,14 @@ def denormalize(
|
||||
return config_denorm
|
||||
|
||||
|
||||
def equal(config, const) -> bool:
|
||||
if config == const:
|
||||
return True
|
||||
if not isinstance(config, Dict) or not isinstance(const, Dict):
|
||||
return False
|
||||
return all(equal(config[key], value) for key, value in const.items())
|
||||
|
||||
|
||||
def indexof(domain: Dict, config: Dict) -> int:
|
||||
"""Find the index of config in domain.categories."""
|
||||
index = config.get("_choice_")
|
||||
@@ -435,8 +453,7 @@ def indexof(domain: Dict, config: Dict) -> int:
|
||||
# print(cat.keys())
|
||||
if not set(config.keys()).issubset(set(cat.keys())):
|
||||
continue
|
||||
# print(domain.const[i])
|
||||
if all(config[key] == value for key, value in domain.const[i].items()):
|
||||
if equal(config, domain.const[i]):
|
||||
# assumption: the concatenation of constants is a unique identifier
|
||||
return i
|
||||
return None
|
||||
|
||||
@@ -221,7 +221,7 @@ def run(
|
||||
used, otherwise no scheduler will be used. When set 'flaml', an
|
||||
authentic scheduler implemented in FLAML will be used. It does not
|
||||
require users to report intermediate results in evaluation_function.
|
||||
Find more details abuot this scheduler in this paper
|
||||
Find more details about this scheduler in this paper
|
||||
https://arxiv.org/pdf/1911.04706.pdf).
|
||||
When set 'asha', the input for arguments "resource_attr",
|
||||
"min_resource", "max_resource" and "reduction_factor" will be passed
|
||||
@@ -262,7 +262,7 @@ def run(
|
||||
needed for a config.
|
||||
It is used to skip configs which do not fit in memory.
|
||||
metric_constraints: A list of metric constraints to be satisfied.
|
||||
e.g., `['precision', '>=', 0.9]`.
|
||||
e.g., `['precision', '>=', 0.9]`. The sign can be ">=" or "<=".
|
||||
max_failure: int | the maximal consecutive number of failures to sample
|
||||
a trial before the tuning is terminated.
|
||||
use_ray: A boolean of whether to use ray as the backend.
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = "0.9.4"
|
||||
__version__ = "0.9.5"
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"## 1. Introduction\n",
|
||||
"\n",
|
||||
"FLAML is a Python library (https://github.com/microsoft/FLAML) designed to automatically produce accurate machine learning models \n",
|
||||
"with low computational cost. It is fast and cheap. The simple and lightweight design makes it easy to use and extend, such as adding new learners. FLAML can \n",
|
||||
"with low computational cost. It is fast and economical. The simple and lightweight design makes it easy to use and extend, such as adding new learners. FLAML can \n",
|
||||
"- serve as an economical AutoML engine,\n",
|
||||
"- be used as a fast hyperparameter tuning tool, or \n",
|
||||
"- be embedded in self-tuning software that requires low latency & resource in repetitive\n",
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"source": [
|
||||
"## 1. Introduction\n",
|
||||
"\n",
|
||||
"FLAML is a Python library (https://github.com/microsoft/FLAML) designed to automatically produce accurate machine learning models with low computational cost. It is fast and cheap. The simple and lightweight design makes it easy to use and extend, such as adding new learners. FLAML can\n",
|
||||
"FLAML is a Python library (https://github.com/microsoft/FLAML) designed to automatically produce accurate machine learning models with low computational cost. It is fast and economical. The simple and lightweight design makes it easy to use and extend, such as adding new learners. FLAML can\n",
|
||||
"\n",
|
||||
" - serve as an economical AutoML engine,\n",
|
||||
" - be used as a fast hyperparameter tuning tool, or\n",
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"## 1. Introduction\n",
|
||||
"\n",
|
||||
"FLAML is a Python library (https://github.com/microsoft/FLAML) designed to automatically produce accurate machine learning models \n",
|
||||
"with low computational cost. It is fast and cheap. The simple and lightweight design makes it easy \n",
|
||||
"with low computational cost. It is fast and economical. The simple and lightweight design makes it easy \n",
|
||||
"to use and extend, such as adding new learners. FLAML can \n",
|
||||
"- serve as an economical AutoML engine,\n",
|
||||
"- be used as a fast hyperparameter tuning tool, or \n",
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"## 1. Introduction\n",
|
||||
"\n",
|
||||
"FLAML is a Python library (https://github.com/microsoft/FLAML) designed to automatically produce accurate machine learning models \n",
|
||||
"with low computational cost. It is fast and cheap. The simple and lightweight design makes it easy \n",
|
||||
"with low computational cost. It is fast and economical. The simple and lightweight design makes it easy \n",
|
||||
"to use and extend, such as adding new learners. FLAML can \n",
|
||||
"- serve as an economical AutoML engine,\n",
|
||||
"- be used as a fast hyperparameter tuning tool, or \n",
|
||||
|
||||
@@ -2,33 +2,34 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) 2021. All rights reserved.\n",
|
||||
"\n",
|
||||
"Contributed by: @bnriiitb\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License."
|
||||
],
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Using AutoML in Sklearn Pipeline\n",
|
||||
"\n",
|
||||
"This tutorial will help you understand how FLAML's AutoML can be used as a transformer in the Sklearn pipeline."
|
||||
],
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"## 1.Introduction\n",
|
||||
"\n",
|
||||
"### 1.1 FLAML - Fast and Lightweight AutoML\n",
|
||||
"\n",
|
||||
"FLAML is a Python library (https://github.com/microsoft/FLAML) designed to automatically produce accurate machine learning models with low computational cost. It is fast and cheap. The simple and lightweight design makes it easy to use and extend, such as adding new learners. \n",
|
||||
"FLAML is a Python library (https://github.com/microsoft/FLAML) designed to automatically produce accurate machine learning models with low computational cost. It is fast and economical. The simple and lightweight design makes it easy to use and extend, such as adding new learners. \n",
|
||||
"\n",
|
||||
"FLAML can \n",
|
||||
"- serve as an economical AutoML engine,\n",
|
||||
@@ -42,11 +43,11 @@
|
||||
"```bash\n",
|
||||
"pip install flaml[notebook]\n",
|
||||
"```"
|
||||
],
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 1.2 Why are pipelines a silver bullet?\n",
|
||||
"\n",
|
||||
@@ -62,47 +63,42 @@
|
||||
"* Allow hyperparameter tuning across the estimators\n",
|
||||
"* Easier to share and collaborate with multiple users (bug fixes, enhancements etc)\n",
|
||||
"* Enforce the implementation and order of steps"
|
||||
],
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### As FLAML's AutoML module can be used a transformer in the Sklearn's pipeline we can get all the benefits of pipeline and thereby write extremley clean, and resuable code."
|
||||
],
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 44,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install flaml[notebook];"
|
||||
],
|
||||
"outputs": [],
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Classification Example\n",
|
||||
"### Load data and preprocess\n",
|
||||
"\n",
|
||||
"Download [Airlines dataset](https://www.openml.org/d/1169) from OpenML. The task is to predict whether a given flight will be delayed, given the information of the scheduled departure."
|
||||
],
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"source": [
|
||||
"from flaml.data import load_openml_dataset\n",
|
||||
"X_train, X_test, y_train, y_test = load_openml_dataset(\n",
|
||||
" dataset_id=1169, data_dir='./', random_state=1234, dataset_format='array')"
|
||||
],
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"load dataset from ./openml_ds1169.pkl\n",
|
||||
"Dataset name: airlines\n",
|
||||
@@ -111,38 +107,62 @@
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {}
|
||||
"source": [
|
||||
"from flaml.data import load_openml_dataset\n",
|
||||
"X_train, X_test, y_train, y_test = load_openml_dataset(\n",
|
||||
" dataset_id=1169, data_dir='./', random_state=1234, dataset_format='array')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"source": [
|
||||
"X_train[0]"
|
||||
],
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "execute_result",
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"array([ 12., 2648., 4., 15., 4., 450., 67.], dtype=float32)"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"execution_count": 5
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"metadata": {}
|
||||
"source": [
|
||||
"X_train[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3. Create a Pipeline"
|
||||
],
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<style>div.sk-top-container {color: black;background-color: white;}div.sk-toggleable {background-color: white;}label.sk-toggleable__label {cursor: pointer;display: block;width: 100%;margin-bottom: 0;padding: 0.2em 0.3em;box-sizing: border-box;text-align: center;}div.sk-toggleable__content {max-height: 0;max-width: 0;overflow: hidden;text-align: left;background-color: #f0f8ff;}div.sk-toggleable__content pre {margin: 0.2em;color: black;border-radius: 0.25em;background-color: #f0f8ff;}input.sk-toggleable__control:checked~div.sk-toggleable__content {max-height: 200px;max-width: 100%;overflow: auto;}div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}input.sk-hidden--visually {border: 0;clip: rect(1px 1px 1px 1px);clip: rect(1px, 1px, 1px, 1px);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;}div.sk-estimator {font-family: monospace;background-color: #f0f8ff;margin: 0.25em 0.25em;border: 1px dotted black;border-radius: 0.25em;box-sizing: border-box;}div.sk-estimator:hover {background-color: #d4ebff;}div.sk-parallel-item::after {content: \"\";width: 100%;border-bottom: 1px solid gray;flex-grow: 1;}div.sk-label:hover label.sk-toggleable__label {background-color: #d4ebff;}div.sk-serial::before {content: \"\";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 2em;bottom: 0;left: 50%;}div.sk-serial {display: flex;flex-direction: column;align-items: center;background-color: white;}div.sk-item {z-index: 1;}div.sk-parallel {display: flex;align-items: stretch;justify-content: center;background-color: white;}div.sk-parallel-item {display: flex;flex-direction: column;position: relative;background-color: white;}div.sk-parallel-item:first-child::after {align-self: flex-end;width: 50%;}div.sk-parallel-item:last-child::after {align-self: flex-start;width: 50%;}div.sk-parallel-item:only-child::after {width: 0;}div.sk-dashed-wrapped {border: 1px dashed gray;margin: 0.2em;box-sizing: border-box;padding-bottom: 0.1em;background-color: white;position: relative;}div.sk-label label {font-family: monospace;font-weight: bold;background-color: white;display: inline-block;line-height: 1.2em;}div.sk-label-container {position: relative;z-index: 2;text-align: center;}div.sk-container {display: inline-block;position: relative;}</style><div class=\"sk-top-container\"><div class=\"sk-container\"><div class=\"sk-item sk-dashed-wrapped\"><div class=\"sk-label-container\"><div class=\"sk-label sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"b91d1bdf-ccb8-4fa5-a2d0-67a3538c0afc\" type=\"checkbox\" ><label class=\"sk-toggleable__label\" for=\"b91d1bdf-ccb8-4fa5-a2d0-67a3538c0afc\">Pipeline</label><div class=\"sk-toggleable__content\"><pre>Pipeline(steps=[('imputuer', SimpleImputer()),\n",
|
||||
" ('standardizer', StandardScaler()),\n",
|
||||
" ('automl', <flaml.automl.AutoML object at 0x7f046d56fb50>)])</pre></div></div></div><div class=\"sk-serial\"><div class=\"sk-item\"><div class=\"sk-estimator sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"a8311733-9e55-4c0c-9c2a-6b9ba6227596\" type=\"checkbox\" ><label class=\"sk-toggleable__label\" for=\"a8311733-9e55-4c0c-9c2a-6b9ba6227596\">SimpleImputer</label><div class=\"sk-toggleable__content\"><pre>SimpleImputer()</pre></div></div></div><div class=\"sk-item\"><div class=\"sk-estimator sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"52580e54-89ab-4fb7-83a1-ae13962854bb\" type=\"checkbox\" ><label class=\"sk-toggleable__label\" for=\"52580e54-89ab-4fb7-83a1-ae13962854bb\">StandardScaler</label><div class=\"sk-toggleable__content\"><pre>StandardScaler()</pre></div></div></div><div class=\"sk-item\"><div class=\"sk-estimator sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"b9fe5397-bf24-491d-a938-c39a780e1ac0\" type=\"checkbox\" ><label class=\"sk-toggleable__label\" for=\"b9fe5397-bf24-491d-a938-c39a780e1ac0\">AutoML</label><div class=\"sk-toggleable__content\"><pre><flaml.automl.AutoML object at 0x7f046d56fb50></pre></div></div></div></div></div></div></div>"
|
||||
],
|
||||
"text/plain": [
|
||||
"Pipeline(steps=[('imputuer', SimpleImputer()),\n",
|
||||
" ('standardizer', StandardScaler()),\n",
|
||||
" ('automl', <flaml.automl.AutoML object at 0x7f046d56fb50>)])"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import sklearn\n",
|
||||
"from sklearn import set_config\n",
|
||||
@@ -163,39 +183,21 @@
|
||||
" (\"automl\", automl)\n",
|
||||
"])\n",
|
||||
"automl_pipeline"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "execute_result",
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Pipeline(steps=[('imputuer', SimpleImputer()),\n",
|
||||
" ('standardizer', StandardScaler()),\n",
|
||||
" ('automl', <flaml.automl.AutoML object at 0x7f046d56fb50>)])"
|
||||
],
|
||||
"text/html": [
|
||||
"<style>div.sk-top-container {color: black;background-color: white;}div.sk-toggleable {background-color: white;}label.sk-toggleable__label {cursor: pointer;display: block;width: 100%;margin-bottom: 0;padding: 0.2em 0.3em;box-sizing: border-box;text-align: center;}div.sk-toggleable__content {max-height: 0;max-width: 0;overflow: hidden;text-align: left;background-color: #f0f8ff;}div.sk-toggleable__content pre {margin: 0.2em;color: black;border-radius: 0.25em;background-color: #f0f8ff;}input.sk-toggleable__control:checked~div.sk-toggleable__content {max-height: 200px;max-width: 100%;overflow: auto;}div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}input.sk-hidden--visually {border: 0;clip: rect(1px 1px 1px 1px);clip: rect(1px, 1px, 1px, 1px);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;}div.sk-estimator {font-family: monospace;background-color: #f0f8ff;margin: 0.25em 0.25em;border: 1px dotted black;border-radius: 0.25em;box-sizing: border-box;}div.sk-estimator:hover {background-color: #d4ebff;}div.sk-parallel-item::after {content: \"\";width: 100%;border-bottom: 1px solid gray;flex-grow: 1;}div.sk-label:hover label.sk-toggleable__label {background-color: #d4ebff;}div.sk-serial::before {content: \"\";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 2em;bottom: 0;left: 50%;}div.sk-serial {display: flex;flex-direction: column;align-items: center;background-color: white;}div.sk-item {z-index: 1;}div.sk-parallel {display: flex;align-items: stretch;justify-content: center;background-color: white;}div.sk-parallel-item {display: flex;flex-direction: column;position: relative;background-color: white;}div.sk-parallel-item:first-child::after {align-self: flex-end;width: 50%;}div.sk-parallel-item:last-child::after {align-self: flex-start;width: 50%;}div.sk-parallel-item:only-child::after {width: 0;}div.sk-dashed-wrapped {border: 1px dashed gray;margin: 0.2em;box-sizing: border-box;padding-bottom: 0.1em;background-color: white;position: relative;}div.sk-label label {font-family: monospace;font-weight: bold;background-color: white;display: inline-block;line-height: 1.2em;}div.sk-label-container {position: relative;z-index: 2;text-align: center;}div.sk-container {display: inline-block;position: relative;}</style><div class=\"sk-top-container\"><div class=\"sk-container\"><div class=\"sk-item sk-dashed-wrapped\"><div class=\"sk-label-container\"><div class=\"sk-label sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"b91d1bdf-ccb8-4fa5-a2d0-67a3538c0afc\" type=\"checkbox\" ><label class=\"sk-toggleable__label\" for=\"b91d1bdf-ccb8-4fa5-a2d0-67a3538c0afc\">Pipeline</label><div class=\"sk-toggleable__content\"><pre>Pipeline(steps=[('imputuer', SimpleImputer()),\n",
|
||||
" ('standardizer', StandardScaler()),\n",
|
||||
" ('automl', <flaml.automl.AutoML object at 0x7f046d56fb50>)])</pre></div></div></div><div class=\"sk-serial\"><div class=\"sk-item\"><div class=\"sk-estimator sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"a8311733-9e55-4c0c-9c2a-6b9ba6227596\" type=\"checkbox\" ><label class=\"sk-toggleable__label\" for=\"a8311733-9e55-4c0c-9c2a-6b9ba6227596\">SimpleImputer</label><div class=\"sk-toggleable__content\"><pre>SimpleImputer()</pre></div></div></div><div class=\"sk-item\"><div class=\"sk-estimator sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"52580e54-89ab-4fb7-83a1-ae13962854bb\" type=\"checkbox\" ><label class=\"sk-toggleable__label\" for=\"52580e54-89ab-4fb7-83a1-ae13962854bb\">StandardScaler</label><div class=\"sk-toggleable__content\"><pre>StandardScaler()</pre></div></div></div><div class=\"sk-item\"><div class=\"sk-estimator sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"b9fe5397-bf24-491d-a938-c39a780e1ac0\" type=\"checkbox\" ><label class=\"sk-toggleable__label\" for=\"b9fe5397-bf24-491d-a938-c39a780e1ac0\">AutoML</label><div class=\"sk-toggleable__content\"><pre><flaml.automl.AutoML object at 0x7f046d56fb50></pre></div></div></div></div></div></div></div>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"execution_count": 6
|
||||
}
|
||||
],
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Run FLAML\n",
|
||||
"In the FLAML automl run configuration, users can specify the task type, time budget, error metric, learner list, whether to subsample, resampling strategy type, and so on. All these arguments have default values which will be used if users do not provide them. For example, the default ML learners of FLAML are `['lgbm', 'xgboost', 'catboost', 'rf', 'extra_tree', 'lrl1']`. "
|
||||
],
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"settings = {\n",
|
||||
" \"time_budget\": 60, # total running time in seconds\n",
|
||||
@@ -204,24 +206,16 @@
|
||||
" \"estimator_list\":['xgboost','catboost','lgbm'],\n",
|
||||
" \"log_file_name\": 'airlines_experiment.log', # flaml log file\n",
|
||||
"}"
|
||||
],
|
||||
"outputs": [],
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"source": [
|
||||
"automl_pipeline.fit(X_train, y_train, \n",
|
||||
" automl__time_budget=settings['time_budget'],\n",
|
||||
" automl__metric=settings['metric'],\n",
|
||||
" automl__estimator_list=settings['estimator_list'],\n",
|
||||
" automl__log_training_metric=True)"
|
||||
],
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[flaml.automl: 08-22 21:32:13] {1130} INFO - Evaluation method: holdout\n",
|
||||
"[flaml.automl: 08-22 21:32:14] {624} INFO - Using StratifiedKFold\n",
|
||||
@@ -389,28 +383,47 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"output_type": "execute_result",
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Pipeline(steps=[('imputuer', SimpleImputer()),\n",
|
||||
" ('standardizer', StandardScaler()),\n",
|
||||
" ('automl', <flaml.automl.AutoML object at 0x7f046d56fb50>)])"
|
||||
],
|
||||
"text/html": [
|
||||
"<style>div.sk-top-container {color: black;background-color: white;}div.sk-toggleable {background-color: white;}label.sk-toggleable__label {cursor: pointer;display: block;width: 100%;margin-bottom: 0;padding: 0.2em 0.3em;box-sizing: border-box;text-align: center;}div.sk-toggleable__content {max-height: 0;max-width: 0;overflow: hidden;text-align: left;background-color: #f0f8ff;}div.sk-toggleable__content pre {margin: 0.2em;color: black;border-radius: 0.25em;background-color: #f0f8ff;}input.sk-toggleable__control:checked~div.sk-toggleable__content {max-height: 200px;max-width: 100%;overflow: auto;}div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}input.sk-hidden--visually {border: 0;clip: rect(1px 1px 1px 1px);clip: rect(1px, 1px, 1px, 1px);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;}div.sk-estimator {font-family: monospace;background-color: #f0f8ff;margin: 0.25em 0.25em;border: 1px dotted black;border-radius: 0.25em;box-sizing: border-box;}div.sk-estimator:hover {background-color: #d4ebff;}div.sk-parallel-item::after {content: \"\";width: 100%;border-bottom: 1px solid gray;flex-grow: 1;}div.sk-label:hover label.sk-toggleable__label {background-color: #d4ebff;}div.sk-serial::before {content: \"\";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 2em;bottom: 0;left: 50%;}div.sk-serial {display: flex;flex-direction: column;align-items: center;background-color: white;}div.sk-item {z-index: 1;}div.sk-parallel {display: flex;align-items: stretch;justify-content: center;background-color: white;}div.sk-parallel-item {display: flex;flex-direction: column;position: relative;background-color: white;}div.sk-parallel-item:first-child::after {align-self: flex-end;width: 50%;}div.sk-parallel-item:last-child::after {align-self: flex-start;width: 50%;}div.sk-parallel-item:only-child::after {width: 0;}div.sk-dashed-wrapped {border: 1px dashed gray;margin: 0.2em;box-sizing: border-box;padding-bottom: 0.1em;background-color: white;position: relative;}div.sk-label label {font-family: monospace;font-weight: bold;background-color: white;display: inline-block;line-height: 1.2em;}div.sk-label-container {position: relative;z-index: 2;text-align: center;}div.sk-container {display: inline-block;position: relative;}</style><div class=\"sk-top-container\"><div class=\"sk-container\"><div class=\"sk-item sk-dashed-wrapped\"><div class=\"sk-label-container\"><div class=\"sk-label sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"b994edf1-5e76-4cd3-b719-4a204af673dc\" type=\"checkbox\" ><label class=\"sk-toggleable__label\" for=\"b994edf1-5e76-4cd3-b719-4a204af673dc\">Pipeline</label><div class=\"sk-toggleable__content\"><pre>Pipeline(steps=[('imputuer', SimpleImputer()),\n",
|
||||
" ('standardizer', StandardScaler()),\n",
|
||||
" ('automl', <flaml.automl.AutoML object at 0x7f046d56fb50>)])</pre></div></div></div><div class=\"sk-serial\"><div class=\"sk-item\"><div class=\"sk-estimator sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"c94ee64a-d8b1-4cbb-aeca-952bf6963c13\" type=\"checkbox\" ><label class=\"sk-toggleable__label\" for=\"c94ee64a-d8b1-4cbb-aeca-952bf6963c13\">SimpleImputer</label><div class=\"sk-toggleable__content\"><pre>SimpleImputer()</pre></div></div></div><div class=\"sk-item\"><div class=\"sk-estimator sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"6a28d11a-19e2-4243-8b85-e3ba5f6f2a7e\" type=\"checkbox\" ><label class=\"sk-toggleable__label\" for=\"6a28d11a-19e2-4243-8b85-e3ba5f6f2a7e\">StandardScaler</label><div class=\"sk-toggleable__content\"><pre>StandardScaler()</pre></div></div></div><div class=\"sk-item\"><div class=\"sk-estimator sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"03dcbe59-a8be-4f09-a944-115d90939f81\" type=\"checkbox\" ><label class=\"sk-toggleable__label\" for=\"03dcbe59-a8be-4f09-a944-115d90939f81\">AutoML</label><div class=\"sk-toggleable__content\"><pre><flaml.automl.AutoML object at 0x7f046d56fb50></pre></div></div></div></div></div></div></div>"
|
||||
],
|
||||
"text/plain": [
|
||||
"Pipeline(steps=[('imputuer', SimpleImputer()),\n",
|
||||
" ('standardizer', StandardScaler()),\n",
|
||||
" ('automl', <flaml.automl.AutoML object at 0x7f046d56fb50>)])"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"execution_count": 8
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"metadata": {}
|
||||
"source": [
|
||||
"automl_pipeline.fit(X_train, y_train, \n",
|
||||
" automl__time_budget=settings['time_budget'],\n",
|
||||
" automl__metric=settings['metric'],\n",
|
||||
" automl__estimator_list=settings['estimator_list'],\n",
|
||||
" automl__log_training_metric=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Best ML leaner: xgboost\n",
|
||||
"Best hyperparmeter config: {'n_estimators': 63, 'max_leaves': 1797, 'min_child_weight': 0.07275175679381725, 'learning_rate': 0.06234183309508761, 'subsample': 0.9814772488195874, 'colsample_bylevel': 0.810466508891351, 'colsample_bytree': 0.8005378817953572, 'reg_alpha': 0.5768305704485758, 'reg_lambda': 6.867180836557797, 'FLAML_sample_size': 364083}\n",
|
||||
"Best accuracy on validation data: 0.6721\n",
|
||||
"Training duration of best run: 15.45 s\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Get the automl object from the pipeline\n",
|
||||
"automl = automl_pipeline.steps[2][1]\n",
|
||||
@@ -420,75 +433,55 @@
|
||||
"print('Best hyperparmeter config:', automl.best_config)\n",
|
||||
"print('Best accuracy on validation data: {0:.4g}'.format(1-automl.best_loss))\n",
|
||||
"print('Training duration of best run: {0:.4g} s'.format(automl.best_config_train_time))"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Best ML leaner: xgboost\n",
|
||||
"Best hyperparmeter config: {'n_estimators': 63, 'max_leaves': 1797, 'min_child_weight': 0.07275175679381725, 'learning_rate': 0.06234183309508761, 'subsample': 0.9814772488195874, 'colsample_bylevel': 0.810466508891351, 'colsample_bytree': 0.8005378817953572, 'reg_alpha': 0.5768305704485758, 'reg_lambda': 6.867180836557797, 'FLAML_sample_size': 364083}\n",
|
||||
"Best accuracy on validation data: 0.6721\n",
|
||||
"Training duration of best run: 15.45 s\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"source": [
|
||||
"automl.model"
|
||||
],
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "execute_result",
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<flaml.model.XGBoostSklearnEstimator at 0x7f03a5eada00>"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"execution_count": 10
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"metadata": {}
|
||||
"source": [
|
||||
"automl.model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 4. Persist the model binary file"
|
||||
],
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Persist the automl object as pickle file\n",
|
||||
"import pickle\n",
|
||||
"with open('automl.pkl', 'wb') as f:\n",
|
||||
" pickle.dump(automl, f, pickle.HIGHEST_PROTOCOL)"
|
||||
],
|
||||
"outputs": [],
|
||||
"metadata": {}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"source": [
|
||||
"# Performance inference on the testing dataset\n",
|
||||
"y_pred = automl_pipeline.predict(X_test)\n",
|
||||
"print('Predicted labels', y_pred)\n",
|
||||
"print('True labels', y_test)\n",
|
||||
"y_pred_proba = automl_pipeline.predict_proba(X_test)[:,1]\n",
|
||||
"print('Predicted probas ',y_pred_proba[:5])"
|
||||
],
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Predicted labels [0 1 1 ... 0 1 0]\n",
|
||||
"True labels [0 0 0 ... 1 0 1]\n",
|
||||
@@ -496,13 +489,23 @@
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {}
|
||||
"source": [
|
||||
"# Performance inference on the testing dataset\n",
|
||||
"y_pred = automl_pipeline.predict(X_test)\n",
|
||||
"print('Predicted labels', y_pred)\n",
|
||||
"print('True labels', y_test)\n",
|
||||
"y_pred_proba = automl_pipeline.predict_proba(X_test)[:,1]\n",
|
||||
"print('Predicted probas ',y_pred_proba[:5])"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"interpreter": {
|
||||
"hash": "0cfea3304185a9579d09e0953576b57c8581e46e6ebc6dfeb681bc5a511f7544"
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3.8.0 64-bit ('blend': conda)"
|
||||
"display_name": "Python 3.8.0 64-bit ('blend': conda)",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -515,11 +518,8 @@
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.0"
|
||||
},
|
||||
"interpreter": {
|
||||
"hash": "0cfea3304185a9579d09e0953576b57c8581e46e6ebc6dfeb681bc5a511f7544"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,14 +11,12 @@ def test_hf_data():
|
||||
from datasets import load_dataset
|
||||
|
||||
try:
|
||||
train_dataset = (
|
||||
load_dataset("glue", "mrpc", split="train[:1%]").to_pandas().iloc[0:4]
|
||||
)
|
||||
train_dataset = load_dataset("glue", "mrpc", split="train[:1%]").to_pandas()
|
||||
dev_dataset = (
|
||||
load_dataset("glue", "mrpc", split="train[1%:2%]").to_pandas().iloc[0:4]
|
||||
load_dataset("glue", "mrpc", split="train[1%:2%]").to_pandas().iloc[:4]
|
||||
)
|
||||
test_dataset = (
|
||||
load_dataset("glue", "mrpc", split="test[1%:2%]").to_pandas().iloc[0:4]
|
||||
load_dataset("glue", "mrpc", split="test[2%:3%]").to_pandas().iloc[:4]
|
||||
)
|
||||
except requests.exceptions.ConnectionError:
|
||||
return
|
||||
@@ -39,7 +37,7 @@ def test_hf_data():
|
||||
automl_settings = {
|
||||
"gpu_per_trial": 0,
|
||||
"max_iter": 3,
|
||||
"time_budget": 5,
|
||||
"time_budget": 10,
|
||||
"task": "seq-classification",
|
||||
"metric": "accuracy",
|
||||
"log_file_name": "seqclass.log",
|
||||
|
||||
@@ -17,7 +17,6 @@ def custom_metric(
|
||||
):
|
||||
from datasets import Dataset
|
||||
from flaml.model import TransformersEstimator
|
||||
from flaml.nlp.utils import load_default_huggingface_metric_for_task
|
||||
|
||||
if estimator._trainer is None:
|
||||
estimator._init_model_for_predict(X_test)
|
||||
@@ -32,13 +31,12 @@ def custom_metric(
|
||||
X_test, _ = estimator._preprocess(X_test)
|
||||
eval_dataset = Dataset.from_pandas(X_test)
|
||||
|
||||
estimator_metric_cache = estimator._metric
|
||||
estimator._metric = load_default_huggingface_metric_for_task(estimator._task)
|
||||
|
||||
estimator_metric_backup = estimator._metric
|
||||
estimator._metric = "rmse"
|
||||
metrics = trainer.evaluate(eval_dataset)
|
||||
estimator._metric = estimator_metric_cache
|
||||
estimator._metric = estimator_metric_backup
|
||||
|
||||
return metrics["eval_val_loss"], metrics
|
||||
return metrics.pop("eval_automl_metric"), metrics
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform == "darwin", reason="do not run on mac os")
|
||||
|
||||
@@ -13,11 +13,9 @@ def test_regression():
|
||||
from datasets import load_dataset
|
||||
|
||||
try:
|
||||
train_dataset = (
|
||||
load_dataset("glue", "stsb", split="train[:1%]").to_pandas().iloc[:20]
|
||||
)
|
||||
train_dataset = load_dataset("glue", "stsb", split="train[:2%]").to_pandas()
|
||||
dev_dataset = (
|
||||
load_dataset("glue", "stsb", split="train[1%:2%]").to_pandas().iloc[:20]
|
||||
load_dataset("glue", "stsb", split="train[2%:3%]").to_pandas().iloc[:32]
|
||||
)
|
||||
except requests.exceptions.ConnectionError:
|
||||
return
|
||||
@@ -50,9 +48,12 @@ def test_regression():
|
||||
"fp16": False,
|
||||
}
|
||||
|
||||
ray.shutdown()
|
||||
ray.init()
|
||||
automl.fit(
|
||||
X_train=X_train, y_train=y_train, X_val=X_val, y_val=y_val, **automl_settings
|
||||
)
|
||||
automl.predict(X_val)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
117
test/test_model.py
Normal file
@@ -0,0 +1,117 @@
|
||||
from sklearn.datasets import make_classification
|
||||
import numpy as np
|
||||
from pandas import DataFrame
|
||||
from datetime import datetime
|
||||
from flaml.model import (
|
||||
KNeighborsEstimator,
|
||||
LRL2Classifier,
|
||||
BaseEstimator,
|
||||
LGBMEstimator,
|
||||
CatBoostEstimator,
|
||||
XGBoostEstimator,
|
||||
RandomForestEstimator,
|
||||
Prophet,
|
||||
ARIMA,
|
||||
LGBM_TS_Regressor,
|
||||
)
|
||||
|
||||
|
||||
def test_lrl2():
|
||||
BaseEstimator.search_space(1, "")
|
||||
X, y = make_classification(100000, 1000)
|
||||
print("start")
|
||||
lr = LRL2Classifier()
|
||||
lr.predict(X)
|
||||
lr.fit(X, y, budget=1e-5)
|
||||
|
||||
|
||||
def test_prep():
|
||||
X = np.array(
|
||||
list(
|
||||
zip(
|
||||
[
|
||||
3.0,
|
||||
16.0,
|
||||
10.0,
|
||||
12.0,
|
||||
3.0,
|
||||
14.0,
|
||||
11.0,
|
||||
12.0,
|
||||
5.0,
|
||||
14.0,
|
||||
20.0,
|
||||
16.0,
|
||||
15.0,
|
||||
11.0,
|
||||
],
|
||||
[
|
||||
"a",
|
||||
"b",
|
||||
"a",
|
||||
"c",
|
||||
"c",
|
||||
"b",
|
||||
"b",
|
||||
"b",
|
||||
"b",
|
||||
"a",
|
||||
"b",
|
||||
1.0,
|
||||
1.0,
|
||||
"a",
|
||||
],
|
||||
)
|
||||
),
|
||||
dtype=object,
|
||||
)
|
||||
y = np.array([0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1])
|
||||
lr = LRL2Classifier()
|
||||
lr.fit(X, y)
|
||||
lr.predict(X)
|
||||
lgbm = LGBMEstimator(n_estimators=4)
|
||||
lgbm.fit(X, y)
|
||||
cat = CatBoostEstimator(n_estimators=4)
|
||||
cat.fit(X, y)
|
||||
knn = KNeighborsEstimator(task="regression")
|
||||
knn.fit(X, y)
|
||||
xgb = XGBoostEstimator(n_estimators=4, max_leaves=4)
|
||||
xgb.fit(X, y)
|
||||
xgb.predict(X)
|
||||
rf = RandomForestEstimator(task="regression", n_estimators=4, criterion="gini")
|
||||
rf.fit(X, y)
|
||||
|
||||
prophet = Prophet()
|
||||
try:
|
||||
prophet.predict(4)
|
||||
except ValueError:
|
||||
# predict() with steps is only supported for arima/sarimax.
|
||||
pass
|
||||
prophet.predict(X)
|
||||
|
||||
arima = ARIMA()
|
||||
arima.predict(X)
|
||||
arima._model = False
|
||||
try:
|
||||
arima.predict(X)
|
||||
except ValueError:
|
||||
# X_test needs to be either a pandas Dataframe with dates as the first column or an int number of periods for predict().
|
||||
pass
|
||||
|
||||
lgbm = LGBM_TS_Regressor(optimize_for_horizon=True, lags=1)
|
||||
X = DataFrame(
|
||||
{
|
||||
"A": [
|
||||
datetime(1900, 2, 3),
|
||||
datetime(1900, 3, 4),
|
||||
datetime(1900, 3, 4),
|
||||
datetime(1900, 3, 4),
|
||||
datetime(1900, 7, 2),
|
||||
datetime(1900, 8, 9),
|
||||
],
|
||||
}
|
||||
)
|
||||
y = np.array([0, 1, 0, 1, 0, 0])
|
||||
lgbm.predict(X[:2])
|
||||
lgbm.fit(X, y, period=2)
|
||||
lgbm.predict(X[:2])
|
||||
25
test/tune/test_constraints.py
Normal file
@@ -0,0 +1,25 @@
|
||||
def test_config_constraint():
|
||||
from flaml import tune
|
||||
|
||||
# Test dict return value
|
||||
def evaluate_config_dict(config):
|
||||
metric = (round(config["x"]) - 85000) ** 2 - config["x"] / config["y"]
|
||||
return {"metric": metric}
|
||||
|
||||
def config_constraint(config):
|
||||
if config["y"] >= config["x"]:
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
tune.run(
|
||||
evaluate_config_dict,
|
||||
config={
|
||||
"x": tune.qloguniform(lower=1, upper=100000, q=1),
|
||||
"y": tune.qrandint(lower=2, upper=100000, q=2),
|
||||
},
|
||||
config_constraints=[(config_constraint, ">", 0.5)],
|
||||
metric="metric",
|
||||
mode="max",
|
||||
num_samples=100,
|
||||
)
|
||||
@@ -1,3 +1,4 @@
|
||||
from time import sleep
|
||||
import numpy as np
|
||||
|
||||
try:
|
||||
@@ -12,22 +13,38 @@ except (ImportError, AssertionError):
|
||||
|
||||
use_ray = False
|
||||
|
||||
from flaml.searcher.suggestion import OptunaSearch, Searcher, ConcurrencyLimiter
|
||||
from flaml.searcher.blendsearch import BlendSearch, CFO, RandomSearch
|
||||
|
||||
|
||||
def define_search_space(trial):
|
||||
trial.suggest_float("a", 6, 8)
|
||||
trial.suggest_float("b", 1e-4, 1e-2, log=True)
|
||||
|
||||
|
||||
def long_define_search_space(trial):
|
||||
sleep(1)
|
||||
return 3
|
||||
|
||||
|
||||
def wrong_define_search_space(trial):
|
||||
return {1: 1}
|
||||
|
||||
|
||||
def test_searcher():
|
||||
from flaml.searcher.suggestion import OptunaSearch, Searcher, ConcurrencyLimiter
|
||||
from flaml.searcher.blendsearch import BlendSearch, CFO, RandomSearch
|
||||
from flaml.tune import sample as flamlsample
|
||||
|
||||
searcher = Searcher()
|
||||
try:
|
||||
searcher = Searcher(metric=1, mode=1)
|
||||
except ValueError:
|
||||
# Mode must either be a list or string
|
||||
pass
|
||||
searcher = Searcher(metric=["m1", "m2"], mode=["max", "min"])
|
||||
searcher.set_search_properties(None, None, None)
|
||||
searcher.suggest = searcher.on_pause = searcher.on_unpause = lambda _: {}
|
||||
searcher.on_trial_complete = lambda trial_id, result, error: None
|
||||
searcher = ConcurrencyLimiter(searcher, max_concurrent=2, batch=True)
|
||||
searcher.on_trial_complete("t0")
|
||||
searcher.suggest("t1")
|
||||
searcher.suggest("t2")
|
||||
searcher.on_pause("t1")
|
||||
@@ -43,6 +60,12 @@ def test_searcher():
|
||||
"a": optuna.distributions.UniformDistribution(6, 8),
|
||||
"b": optuna.distributions.LogUniformDistribution(1e-4, 1e-2),
|
||||
}
|
||||
searcher = OptunaSearch(["a", config["a"]], metric="m", mode="max")
|
||||
try:
|
||||
searcher.suggest("t0")
|
||||
except ValueError:
|
||||
# not enough values to unpack (expected 3, got 1)
|
||||
pass
|
||||
searcher = OptunaSearch(
|
||||
config,
|
||||
points_to_evaluate=[{"a": 6, "b": 1e-3}],
|
||||
@@ -50,14 +73,105 @@ def test_searcher():
|
||||
metric="m",
|
||||
mode="max",
|
||||
)
|
||||
try:
|
||||
searcher.add_evaluated_point({}, None, error=True)
|
||||
except ValueError:
|
||||
# nconsistent parameters set() and distributions {'b', 'a'}.
|
||||
pass
|
||||
try:
|
||||
searcher.add_evaluated_point({"a", 1, "b", 0.01}, None, pruned=True)
|
||||
except AttributeError:
|
||||
# 'set' object has no attribute 'keys'
|
||||
pass
|
||||
try:
|
||||
searcher.add_evaluated_point(
|
||||
{"a": 1, "b": 0.01}, None, intermediate_values=[0.1]
|
||||
)
|
||||
except ValueError:
|
||||
# `value` is supposed to be set for a complete trial.
|
||||
pass
|
||||
try:
|
||||
searcher = OptunaSearch(config, points_to_evaluate=1)
|
||||
except TypeError:
|
||||
# points_to_evaluate expected to be a list, got <class 'int'>
|
||||
pass
|
||||
try:
|
||||
searcher = OptunaSearch(config, points_to_evaluate=[1])
|
||||
except TypeError:
|
||||
# points_to_evaluate expected to include list or dict
|
||||
pass
|
||||
try:
|
||||
searcher = OptunaSearch(config, points_to_evaluate=[{"a": 1}])
|
||||
except ValueError:
|
||||
# Dim of point {'a': 1} and parameter_names {'a': UniformDistribution(high=8.0, low=6.0), 'b': LogUniformDistribution(high=0.01, low=0.0001)} do not match.
|
||||
pass
|
||||
try:
|
||||
searcher = OptunaSearch(
|
||||
config, points_to_evaluate=[{"a": 1, "b": 0.01}], evaluated_rewards=1
|
||||
)
|
||||
except TypeError:
|
||||
# valuated_rewards expected to be a list, got <class 'int'>.
|
||||
pass
|
||||
try:
|
||||
searcher = OptunaSearch(
|
||||
config, points_to_evaluate=[{"a": 1, "b": 0.01}], evaluated_rewards=[1, 2]
|
||||
)
|
||||
except ValueError:
|
||||
# Dim of evaluated_rewards [1, 2] and points_to_evaluate [{'a': 1, 'b': 0.01}] do not match.
|
||||
pass
|
||||
config = {"a": sample.uniform(6, 8), "b": sample.loguniform(1e-4, 1e-2)}
|
||||
# searcher = OptunaSearch(
|
||||
# config,
|
||||
# points_to_evaluate=[{"a": 6, "b": 1e-3}],
|
||||
# evaluated_rewards=[{"m": 2}],
|
||||
# metric="m",
|
||||
# mode="max",
|
||||
# )
|
||||
OptunaSearch.convert_search_space({"a": 1})
|
||||
try:
|
||||
OptunaSearch.convert_search_space({"a": {"grid_search": [1, 2]}})
|
||||
except ValueError:
|
||||
# Grid search parameters cannot be automatically converted to an Optuna search space.
|
||||
pass
|
||||
OptunaSearch.convert_search_space({"a": flamlsample.quniform(1, 3, 1)})
|
||||
try:
|
||||
searcher = OptunaSearch(
|
||||
config,
|
||||
points_to_evaluate=[{"a": 6, "b": 1e-3}],
|
||||
evaluated_rewards=[{"m": 2}],
|
||||
metric="m",
|
||||
mode="max",
|
||||
)
|
||||
except ValueError:
|
||||
# Optuna search does not support parameters of type `Float` with samplers of type `_Uniform`
|
||||
pass
|
||||
searcher = OptunaSearch(long_define_search_space, metric="m", mode="min")
|
||||
try:
|
||||
searcher.suggest("t0")
|
||||
except TypeError:
|
||||
# The return value of the define-by-run function passed in the `space` argument should be either None or a `dict` with `str` keys.
|
||||
pass
|
||||
searcher = OptunaSearch(wrong_define_search_space, metric="m", mode="min")
|
||||
try:
|
||||
searcher.suggest("t0")
|
||||
except TypeError:
|
||||
# At least one of the keys in the dict returned by the define-by-run function passed in the `space` argument was not a `str`.
|
||||
pass
|
||||
searcher = OptunaSearch(metric="m", mode="min")
|
||||
try:
|
||||
searcher.suggest("t0")
|
||||
except RuntimeError:
|
||||
# Trying to sample a configuration from OptunaSearch, but no search space has been defined.
|
||||
pass
|
||||
try:
|
||||
searcher.add_evaluated_point({}, 1)
|
||||
except RuntimeError:
|
||||
# Trying to sample a configuration from OptunaSearch, but no search space has been defined.
|
||||
pass
|
||||
searcher = OptunaSearch(define_search_space)
|
||||
try:
|
||||
searcher.suggest("t0")
|
||||
except RuntimeError:
|
||||
# Trying to sample a configuration from OptunaSearch, but the `metric` (None) or `mode` (None) parameters have not been set.
|
||||
pass
|
||||
try:
|
||||
searcher.add_evaluated_point({}, 1)
|
||||
except RuntimeError:
|
||||
# Trying to sample a configuration from OptunaSearch, but the `metric` (None) or `mode` (None) parameters have not been set.
|
||||
pass
|
||||
searcher = OptunaSearch(
|
||||
define_search_space,
|
||||
points_to_evaluate=[{"a": 6, "b": 1e-3}],
|
||||
@@ -166,3 +280,13 @@ def test_searcher():
|
||||
from flaml import tune
|
||||
|
||||
tune.run(lambda x: 1, config={}, use_ray=use_ray)
|
||||
|
||||
|
||||
def test_no_optuna():
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
subprocess.check_call([sys.executable, "-m", "pip", "uninstall", "-y", "optuna"])
|
||||
import flaml.searcher.suggestion
|
||||
|
||||
subprocess.check_call([sys.executable, "-m", "pip", "install", "optuna==2.8.0"])
|
||||
|
||||
115
test/tune/test_space.py
Normal file
@@ -0,0 +1,115 @@
|
||||
from flaml import BlendSearch, CFO, tune
|
||||
|
||||
|
||||
def test_define_by_run():
|
||||
from flaml.tune.space import (
|
||||
unflatten_hierarchical,
|
||||
normalize,
|
||||
indexof,
|
||||
complete_config,
|
||||
)
|
||||
|
||||
space = {
|
||||
# Sample a float uniformly between -5.0 and -1.0
|
||||
"uniform": tune.uniform(-5, -1),
|
||||
# Sample a float uniformly between 3.2 and 5.4,
|
||||
# rounding to increments of 0.2
|
||||
"quniform": tune.quniform(3.2, 5.4, 0.2),
|
||||
# Sample a float uniformly between 0.0001 and 0.01, while
|
||||
# sampling in log space
|
||||
"loguniform": tune.loguniform(1e-4, 1e-2),
|
||||
# Sample a float uniformly between 0.0001 and 0.1, while
|
||||
# sampling in log space and rounding to increments of 0.00005
|
||||
"qloguniform": tune.qloguniform(1e-4, 1e-1, 5e-5),
|
||||
# Sample a random float from a normal distribution with
|
||||
# mean=10 and sd=2
|
||||
# "randn": tune.randn(10, 2),
|
||||
# Sample a random float from a normal distribution with
|
||||
# mean=10 and sd=2, rounding to increments of 0.2
|
||||
# "qrandn": tune.qrandn(10, 2, 0.2),
|
||||
# Sample a integer uniformly between -9 (inclusive) and 15 (exclusive)
|
||||
"randint": tune.randint(-9, 15),
|
||||
# Sample a random uniformly between -21 (inclusive) and 12 (inclusive (!))
|
||||
# rounding to increments of 3 (includes 12)
|
||||
"qrandint": tune.qrandint(-21, 12, 3),
|
||||
# Sample a integer uniformly between 1 (inclusive) and 10 (exclusive),
|
||||
# while sampling in log space
|
||||
"lograndint": tune.lograndint(1, 10),
|
||||
# Sample a integer uniformly between 2 (inclusive) and 10 (inclusive (!)),
|
||||
# while sampling in log space and rounding to increments of 2
|
||||
"qlograndint": tune.qlograndint(2, 10, 2),
|
||||
# Sample an option uniformly from the specified choices
|
||||
"choice": tune.choice(["a", "b", "c"]),
|
||||
"const": 5,
|
||||
}
|
||||
choice = {"nested": space}
|
||||
bs = BlendSearch(
|
||||
space={"c": tune.choice([choice])},
|
||||
low_cost_partial_config={"c": choice},
|
||||
metric="metric",
|
||||
mode="max",
|
||||
)
|
||||
print(indexof(bs._gs.space["c"], choice))
|
||||
print(indexof(bs._gs.space["c"], {"nested": {"const": 1}}))
|
||||
config = bs._gs.suggest("t1")
|
||||
print(config)
|
||||
config = unflatten_hierarchical(config, bs._gs.space)[0]
|
||||
print(config)
|
||||
print(normalize({"c": [choice]}, bs._gs.space, config, {}, False))
|
||||
space["randn"] = tune.randn(10, 2)
|
||||
cfo = CFO(
|
||||
space={"c": tune.choice([0, choice])},
|
||||
metric="metric",
|
||||
mode="max",
|
||||
)
|
||||
for i in range(5):
|
||||
cfo.suggest(f"t{i}")
|
||||
# print(normalize(config, bs._gs.space, config, {}, False))
|
||||
print(complete_config({}, cfo._ls.space, cfo._ls))
|
||||
|
||||
|
||||
def test_grid():
|
||||
from flaml.searcher.variant_generator import (
|
||||
generate_variants,
|
||||
grid_search,
|
||||
TuneError,
|
||||
has_unresolved_values,
|
||||
)
|
||||
from flaml.tune import sample
|
||||
|
||||
space = {
|
||||
"activation": grid_search(["relu", "tanh"]),
|
||||
"learning_rate": grid_search([1e-3, 1e-4, 1e-5]),
|
||||
"c": sample.choice([2, 3]),
|
||||
}
|
||||
for _, generated in generate_variants({"config": space}):
|
||||
config = generated["config"]
|
||||
print(config)
|
||||
for _, generated in generate_variants({"config": space}, True):
|
||||
config = generated["config"]
|
||||
print(config)
|
||||
space = {
|
||||
"activation": grid_search([{"c": sample.choice([2, 3])}]),
|
||||
"learning_rate": grid_search([1e-3, 1e-4, 1e-5]),
|
||||
}
|
||||
try:
|
||||
for _, generated in generate_variants({"config": space}, True):
|
||||
config = generated["config"]
|
||||
print(config)
|
||||
except ValueError:
|
||||
# The variable `('config', 'activation', 'c')` could not be unambiguously resolved to a single value.
|
||||
pass
|
||||
space = {
|
||||
"c": sample.choice([{"c1": sample.choice([1, 2])}]),
|
||||
"a": sample.randint(1, 10),
|
||||
"b": sample.choice([sample.uniform(10, 20), sample.choice([1, 2])]),
|
||||
}
|
||||
for _, generated in generate_variants({"config": space}):
|
||||
config = generated["config"]
|
||||
print(config)
|
||||
space = {"a": grid_search(3)}
|
||||
try:
|
||||
print(has_unresolved_values(space))
|
||||
except TuneError:
|
||||
# Grid search expected list of values, got: 3
|
||||
pass
|
||||
@@ -80,8 +80,8 @@ nvm install --lts
|
||||
Then:
|
||||
|
||||
```console
|
||||
npm install --global yarn
|
||||
pip install pydoc-markdown
|
||||
npm install --global yarn # skip if you use the dev container we provided
|
||||
pip install pydoc-markdown # skip if you use the dev container we provided
|
||||
cd website
|
||||
yarn install --frozen-lockfile
|
||||
pydoc-markdown
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## Overview
|
||||
|
||||
`flaml.AutoML` is a class for task-oriented AutoML. It can be used as a scikit-learn style estimator with the standard `fit` and `predict` functions. The minimal inputs from users are the training data and the task type.
|
||||
[`flaml.AutoML`](../reference/automl#automl-objects) is a class for task-oriented AutoML. It can be used as a scikit-learn style estimator with the standard `fit` and `predict` functions. The minimal inputs from users are the training data and the task type.
|
||||
|
||||
* Training data:
|
||||
- numpy array. When the input data are stored in numpy array, they are passed to `fit()` as `X_train` and `y_train`.
|
||||
@@ -122,7 +122,7 @@ The estimator list can contain one or more estimator names, each corresponding t
|
||||
|
||||
To tune a custom estimator that is not built-in, you need to:
|
||||
|
||||
1. Build a custom estimator by inheritting `flaml.model.BaseEstimator` or a derived class.
|
||||
1. Build a custom estimator by inheritting [`flaml.model.BaseEstimator`](../reference/model#baseestimator-objects) or a derived class.
|
||||
For example, if you have a estimator class with scikit-learn style `fit()` and `predict()` functions, you only need to set `self.estimator_class` to be that class in your constructor.
|
||||
|
||||
```python
|
||||
@@ -163,7 +163,7 @@ class MyRegularizedGreedyForest(SKLearnEstimator):
|
||||
return space
|
||||
```
|
||||
|
||||
In the constructor, we set `self.estimator_class` as `RGFClassifier` or `RGFRegressor` according to the task type. If the estimator you want to tune does not have a scikit-learn style `fit()` and `predict()` API, you can override the `fit()` and `predict()` function of `flaml.model.BaseEstimator`, like [XGBoostEstimator](https://github.com/microsoft/FLAML/blob/59083fbdcb95c15819a0063a355969203022271c/flaml/model.py#L511).
|
||||
In the constructor, we set `self.estimator_class` as `RGFClassifier` or `RGFRegressor` according to the task type. If the estimator you want to tune does not have a scikit-learn style `fit()` and `predict()` API, you can override the `fit()` and `predict()` function of `flaml.model.BaseEstimator`, like [XGBoostEstimator](../reference/model#xgboostestimator-objects).
|
||||
|
||||
2. Give the custom estimator a name and add it in AutoML. E.g.,
|
||||
|
||||
@@ -377,7 +377,7 @@ print(automl.mdoel)
|
||||
# <flaml.model.LGBMEstimator object at 0x7f9b502c4550>
|
||||
```
|
||||
|
||||
`flaml.model.LGBMEstimator` is a wrapper class for LightGBM models. To access the underlying model, use the `estimator` property of the `flaml.model.LGBMEstimator` instance.
|
||||
[`flaml.model.LGBMEstimator`](../reference/model#lgbmestimator-objects) is a wrapper class for LightGBM models. To access the underlying model, use the `estimator` property of the `flaml.model.LGBMEstimator` instance.
|
||||
|
||||
```python
|
||||
print(automl.model.estimator)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Tune User Defined Function
|
||||
|
||||
`flaml.tune` is a module for economical hyperparameter tuning. It is used internally by `flaml.AutoML`. It can also be used to directly tune a user-defined function (UDF), which is not limited to machine learning model training. You can use `flaml.tune` instead of `flaml.AutoML` if one of the following is true:
|
||||
[`flaml.tune`](../reference/tune/tune) is a module for economical hyperparameter tuning. It is used internally by `flaml.AutoML`. It can also be used to directly tune a user-defined function (UDF), which is not limited to machine learning model training. You can use `flaml.tune` instead of `flaml.AutoML` if one of the following is true:
|
||||
|
||||
1. Your machine learning task is not one of the built-in tasks from `flaml.AutoML`.
|
||||
1. Your input cannot be represented as X_train + y_train or dataframe + label.
|
||||
@@ -75,8 +75,8 @@ config_search_space = {
|
||||
"y": tune.randint(lower=1, upper=100000)
|
||||
}
|
||||
|
||||
# provide the search space to flaml.tune
|
||||
flaml.tune.run(..., config=config_search_space, ...)
|
||||
# provide the search space to tune.run
|
||||
tune.run(..., config=config_search_space, ...)
|
||||
```
|
||||
|
||||
#### More details about the search space domain
|
||||
@@ -121,9 +121,9 @@ config = {
|
||||
# while sampling in log space
|
||||
"lograndint": tune.lograndint(1, 10),
|
||||
|
||||
# Sample a integer uniformly between 1 (inclusive) and 10 (inclusive (!)),
|
||||
# Sample a integer uniformly between 2 (inclusive) and 10 (inclusive (!)),
|
||||
# while sampling in log space and rounding to increments of 2
|
||||
"qlograndint": tune.qlograndint(1, 10, 2),
|
||||
"qlograndint": tune.qlograndint(2, 10, 2),
|
||||
|
||||
# Sample an option uniformly from the specified choices
|
||||
"choice": tune.choice(["a", "b", "c"]),
|
||||
@@ -170,7 +170,7 @@ Optionally, you can provide a list of config constraints to be satisfied through
|
||||
|
||||
|
||||
### Put together
|
||||
After the aforementioned key steps, one is ready to perform a tuning task by calling `flaml.tune.run()`. Below is a quick sequential tuning example using the pre-defined search space `config_search_space` and a minimization (`mode='min'`) objective for the `score` metric evaluated in `evaluate_config`, using the default serach algorithm in flaml. The time budget is 10 seconds (`time_budget_s=10`).
|
||||
After the aforementioned key steps, one is ready to perform a tuning task by calling [`flaml.tune.run()`](../reference/tune/tune#run). Below is a quick sequential tuning example using the pre-defined search space `config_search_space` and a minimization (`mode='min'`) objective for the `score` metric evaluated in `evaluate_config`, using the default serach algorithm in flaml. The time budget is 10 seconds (`time_budget_s=10`).
|
||||
```python
|
||||
# require: pip install flaml[blendsearch]
|
||||
analysis = tune.run(
|
||||
@@ -209,7 +209,7 @@ There are several advanced tuning options worth mentioning.
|
||||
|
||||
### More constraints on the tuning
|
||||
|
||||
A user can specify constraints on the configurations to be satisfied via the argument `config_constraints`. The `config_constraints` receives a list of such constraints to be satisfied. Specifically, each constraint is a tuple that consists of (1) a function that takes a configuration as input and returns a numerical value; (2) an operation chosen from "<=" or ">"; (3) a numerical threshold.
|
||||
A user can specify constraints on the configurations to be satisfied via the argument `config_constraints`. The `config_constraints` receives a list of such constraints to be satisfied. Specifically, each constraint is a tuple that consists of (1) a function that takes a configuration as input and returns a numerical value; (2) an operation chosen from "<=", ">=", "<" or ">"; (3) a numerical threshold.
|
||||
|
||||
In the following code example, we constrain the output of `area`, which takes a configuration as input and outputs a numerical value, to be no larger than 1000.
|
||||
|
||||
@@ -222,7 +222,7 @@ flaml.tune.run(evaluation_function=evaluate_config, mode="min",
|
||||
config_constraints=[(area, "<=", 1000)], ...)
|
||||
```
|
||||
|
||||
You can also specify a list of metric constraints to be satisfied via the argument `metric_constraints`. Each element in the `metric_constraints` list is a tuple that consists of (1) a string specifying the name of the metric (the metric name must be defined and returned in the user-defined `evaluation_function`); (2) an operation chosen from "<=" or ">"; (3) a numerical threshold.
|
||||
You can also specify a list of metric constraints to be satisfied via the argument `metric_constraints`. Each element in the `metric_constraints` list is a tuple that consists of (1) a string specifying the name of the metric (the metric name must be defined and returned in the user-defined `evaluation_function`); (2) an operation chosen from "<=" or ">="; (3) a numerical threshold.
|
||||
|
||||
In the following code example, we constrain the metric `score` to be no larger than 0.4.
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 36 KiB |
@@ -1 +1 @@
|
||||
<svg width="747" height="458" viewBox="0 0 747 458" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" overflow="hidden"><defs><clipPath id="clip0"><rect x="591" y="770" width="747" height="458"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-591 -770)"><path d="M731.5 997C731.5 872.46 835.818 771.5 964.5 771.5 1093.18 771.5 1197.5 872.46 1197.5 997 1197.5 1121.54 1093.18 1222.5 964.5 1222.5 835.818 1222.5 731.5 1121.54 731.5 997Z" stroke="#767171" stroke-width="3.4375" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="8" stroke-opacity="1" fill="none" fill-rule="evenodd"/><path d="M964 865.5 1330.5 986.5 964 1107.5 597.5 986.5Z" fill="#FFFFFF" fill-rule="evenodd" fill-opacity="1"/><path d="M964 865.5 964 1107.5" stroke="#404040" stroke-width="3.4375" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="8" stroke-opacity="1" fill="none" fill-rule="evenodd"/><path d="M964 865.5 1330.5 986.5 964 1107.5 597.5 986.5Z" stroke="#404040" stroke-width="3.4375" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="8" stroke-opacity="1" fill="none" fill-rule="evenodd"/><path d="M964 867 964 1105 602 986Z" fill="#D0CECE" fill-rule="evenodd" fill-opacity="1"/><path d="M873.5 986.5C873.5 939.004 913.123 900.5 962 900.5 1010.88 900.5 1050.5 939.004 1050.5 986.5 1050.5 1034 1010.88 1072.5 962 1072.5 913.123 1072.5 873.5 1034 873.5 986.5Z" stroke="#404040" stroke-width="3.4375" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="8" stroke-opacity="1" fill="none" fill-rule="evenodd"/></g></svg>
|
||||
<svg width="556" height="557" viewBox="0 0 556 557" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" overflow="hidden"><defs><clipPath id="clip0"><rect x="68" y="81" width="556" height="557"/></clipPath><clipPath id="clip1"><rect x="68" y="82" width="555" height="556"/></clipPath><clipPath id="clip2"><rect x="68" y="82" width="555" height="556"/></clipPath><clipPath id="clip3"><rect x="68" y="82" width="555" height="556"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-68 -81)"><g clip-path="url(#clip1)"><g clip-path="url(#clip2)"><g clip-path="url(#clip3)"><path d="M185 184.347 185 112.867C185 88.8439 204.475 69.3692 228.498 69.3692 252.522 69.3692 271.996 88.8439 271.996 112.867L271.996 184.168C311.523 160.063 324.025 108.479 299.92 68.9524 275.815 29.4254 224.232 16.9235 184.705 41.0284 145.178 65.1333 132.676 116.717 156.781 156.243 163.793 167.742 173.473 177.382 185 184.347Z" stroke="#000000" stroke-width="8" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="4" stroke-opacity="1" fill="#FFFFFF" fill-rule="nonzero" fill-opacity="1" transform="matrix(1 0 0 1.0018 68 82)"/><path d="M202.344 112.873 202.344 359.68 202.28 359.68 180.82 263.047C177.513 248.986 163.433 240.269 149.372 243.576 135.558 246.825 126.855 260.499 129.76 274.39L161.147 415.62C162.481 421.599 165.874 426.919 170.732 430.651L228.544 475.109 228.544 514.531 380.21 514.531 380.21 488.377C380.21 451.325 422.054 448.486 422.054 378.533L422.054 284.38C422.072 267.068 408.052 253.019 390.74 253.002 382.921 252.994 375.382 255.909 369.601 261.174 369.688 260.202 369.746 259.225 369.746 258.231 369.757 240.91 355.723 226.859 338.402 226.848 330.29 226.844 322.492 229.981 316.645 235.603 312.833 218.704 296.042 208.094 279.142 211.906 264.823 215.136 254.659 227.863 254.676 242.541L254.676 112.873C254.676 98.4287 242.966 86.7188 228.521 86.7188 214.077 86.7188 202.367 98.4287 202.367 112.873Z" stroke="#000000" stroke-width="8" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="4" stroke-opacity="1" fill="#FFFFFF" fill-rule="nonzero" fill-opacity="1" transform="matrix(1 0 0 1.0018 68 82)"/></g></g></g></g></svg>
|
||||
|
Before Width: | Height: | Size: 1.6 KiB After Width: | Height: | Size: 2.1 KiB |
@@ -1 +1 @@
|
||||
<svg width="1050" height="1018" viewBox="0 0 1050 1018" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" overflow="hidden"><defs><clipPath id="clip0"><rect x="1732" y="462" width="1050" height="1018"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-1732 -462)"><path d="M2625.69 1327.29 2384 975" stroke="#767171" stroke-width="10.3125" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="8" stroke-opacity="1" fill="none" fill-rule="evenodd"/><path d="M1742 973C1742 695.753 1973.69 471 2259.5 471 2545.31 471 2777 695.753 2777 973 2777 1250.25 2545.31 1475 2259.5 1475 1973.69 1475 1742 1250.25 1742 973Z" stroke="#767171" stroke-width="10.3125" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="8" stroke-opacity="1" fill="none" fill-rule="evenodd"/><path d="M2027 781.5C2027 656.96 2131.32 556 2260 556 2388.68 556 2493 656.96 2493 781.5 2493 906.04 2388.68 1007 2260 1007 2131.32 1007 2027 906.04 2027 781.5Z" stroke="#767171" stroke-width="10.3125" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="8" stroke-opacity="1" fill="none" fill-rule="evenodd"/><path d="M0 0 248.988 341.047" stroke="#767171" stroke-width="10.3125" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="8" stroke-opacity="1" fill="none" fill-rule="evenodd" transform="matrix(1 0 0 -1 1893 1328.05)"/><path d="M0 0 101.611 0.379659" stroke="#767171" stroke-width="10.3125" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="8" stroke-opacity="1" fill="none" fill-rule="evenodd" transform="matrix(1 0 0 -1 1792 771.38)"/><path d="M2626 771 2730.12 771.379" stroke="#767171" stroke-width="10.3125" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="8" stroke-opacity="1" fill="none" fill-rule="evenodd"/><path d="M2259.5 650 2626 771 2259.5 892 1893 771Z" fill="#FFFFFF" fill-rule="evenodd" fill-opacity="1"/><path d="M2259.5 650 2259.5 892" stroke="#404040" stroke-width="10.3125" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="8" stroke-opacity="1" fill="none" fill-rule="evenodd"/><path d="M2259.5 650 2626 771 2259.5 892 1893 771Z" stroke="#404040" stroke-width="10.3125" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="8" stroke-opacity="1" fill="none" fill-rule="evenodd"/><path d="M2255 658 2255 883 1914 770.5Z" fill="#D0CECE" fill-rule="evenodd" fill-opacity="1"/><path d="M2168 771C2168 723.504 2207.85 685 2257 685 2306.15 685 2346 723.504 2346 771 2346 818.497 2306.15 857 2257 857 2207.85 857 2168 818.497 2168 771Z" stroke="#404040" stroke-width="10.3125" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="8" stroke-opacity="1" fill="none" fill-rule="evenodd"/></g></svg>
|
||||
<svg width="557" height="557" viewBox="0 0 557 557" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" overflow="hidden"><defs><clipPath id="clip0"><rect x="160" y="92" width="557" height="557"/></clipPath><clipPath id="clip1"><rect x="161" y="93" width="556" height="556"/></clipPath><clipPath id="clip2"><rect x="161" y="93" width="556" height="556"/></clipPath><clipPath id="clip3"><rect x="161" y="93" width="556" height="556"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-160 -92)"><g clip-path="url(#clip1)"><g clip-path="url(#clip2)"><g clip-path="url(#clip3)"><path d="M504.446 309.029C480.121 309.029 461.008 289.337 461.008 265.592 461.008 241.846 480.7 222.154 504.446 222.154 528.771 222.154 547.883 241.846 547.883 265.592 547.883 289.337 528.192 309.029 504.446 309.029ZM602.325 238.371C600.008 230.262 597.112 222.733 593.058 215.783L602.325 188.562 581.475 167.712 554.254 176.979C547.304 172.925 539.775 170.029 531.667 167.712L518.925 142.229 489.967 142.229 477.225 167.712C469.117 170.029 461.587 172.925 454.637 176.979L427.417 167.712 406.567 188.562 415.833 215.783C411.779 222.733 408.883 230.262 406.567 238.371L381.083 251.112 381.083 280.071 406.567 292.812C408.883 300.921 411.779 308.45 415.833 315.4L406.567 342.621 426.837 362.892 454.058 353.625C461.008 357.679 468.537 360.575 476.646 362.892L489.387 388.375 518.346 388.375 531.088 362.892C539.196 360.575 546.725 357.679 553.675 353.625L580.896 362.892 601.746 342.621 592.479 315.4C596.533 308.45 600.008 300.342 602.325 292.812L627.808 280.071 627.808 251.112 602.325 238.371Z" stroke="#000000" stroke-width="8.01441" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="4" stroke-opacity="1" fill="#FFFFFF" fill-rule="nonzero" fill-opacity="1"/><path d="M373.554 519.846C349.229 519.846 330.117 500.154 330.117 476.408 330.117 452.083 349.808 432.971 373.554 432.971 397.879 432.971 416.992 452.662 416.992 476.408 416.992 500.154 397.879 519.846 373.554 519.846L373.554 519.846ZM462.167 426.6 471.433 399.379 450.583 378.529 423.362 387.796C416.412 383.742 408.304 380.846 400.775 378.529L388.033 353.046 359.075 353.046 346.333 378.529C338.225 380.846 330.696 383.742 323.746 387.796L296.525 378.529 276.254 398.8 284.942 426.021C280.888 432.971 277.992 441.079 275.675 448.608L250.192 461.35 250.192 490.308 275.675 503.05C277.992 511.158 280.888 518.688 284.942 525.637L276.254 552.858 296.525 573.129 323.746 564.442C330.696 568.496 338.225 571.392 346.333 573.708L359.075 599.192 388.033 599.192 400.775 573.708C408.883 571.392 416.412 568.496 423.362 564.442L450.583 573.708 470.854 552.858 462.167 526.217C466.221 519.267 469.117 511.738 471.433 503.629L496.917 490.887 496.917 461.929 471.433 449.188C469.117 441.079 466.221 433.55 462.167 426.6Z" stroke="#000000" stroke-width="8.01441" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="4" stroke-opacity="1" fill="#FFFFFF" fill-rule="nonzero" fill-opacity="1"/></g></g></g></g></svg>
|
||||
|
Before Width: | Height: | Size: 2.7 KiB After Width: | Height: | Size: 2.9 KiB |
@@ -1 +1 @@
|
||||
<svg width="200" height="235" viewBox="0 0 200 235" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" overflow="hidden"><defs><clipPath id="clip0"><rect x="948" y="165" width="200" height="235"/></clipPath><clipPath id="clip1"><rect x="948" y="166" width="200" height="234"/></clipPath><clipPath id="clip2"><rect x="948" y="166" width="200" height="234"/></clipPath><clipPath id="clip3"><rect x="948" y="166" width="200" height="234"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-948 -165)"><g clip-path="url(#clip1)"><g clip-path="url(#clip2)"><g clip-path="url(#clip3)"><path d="M70.8333 185.417 93.75 108.333 58.3333 108.333 75.875 15.0833 128.292 15.0833 106.25 83.3333 141.667 83.3333 70.8333 185.417Z" stroke="#767171" stroke-width="2" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="4" stroke-opacity="1" fill="#FFFFFF" fill-rule="nonzero" fill-opacity="1" transform="matrix(1 0 0 1.17 948 166)"/></g></g></g></g></svg>
|
||||
<svg width="200" height="235" viewBox="0 0 200 235" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" overflow="hidden"><defs><clipPath id="clip0"><rect x="948" y="165" width="200" height="235"/></clipPath><clipPath id="clip1"><rect x="948" y="166" width="200" height="234"/></clipPath><clipPath id="clip2"><rect x="948" y="166" width="200" height="234"/></clipPath><clipPath id="clip3"><rect x="948" y="166" width="200" height="234"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-948 -165)"><g clip-path="url(#clip1)"><g clip-path="url(#clip2)"><g clip-path="url(#clip3)"><path d="M70.8333 185.417 93.75 108.333 58.3333 108.333 75.875 15.0833 128.292 15.0833 106.25 83.3333 141.667 83.3333 70.8333 185.417Z" stroke="#000000" stroke-width="3" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="4" stroke-opacity="1" fill="#FFFFFF" fill-rule="nonzero" fill-opacity="1" transform="matrix(1 0 0 1.17 948 166)"/></g></g></g></g></svg>
|
||||
|
Before Width: | Height: | Size: 993 B After Width: | Height: | Size: 993 B |
1
website/static/img/flaml.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 1138.16 450.71"><defs><style>.cls-1{fill:#ff9406;}.cls-2{fill:#505d66;}</style></defs><g id="Layer_2" data-name="Layer 2"><g id="图层_1" data-name="图层 1"><path class="cls-1" d="M298,76.7,173.81.24a1.58,1.58,0,0,0-2.06,2.35L211.55,51a263.57,263.57,0,0,0-33.37,3.9,252.77,252.77,0,0,0-35,9,240.65,240.65,0,0,0-33,13.92,228.41,228.41,0,0,0-30.5,18.8,211.86,211.86,0,0,0-29,25.52,191.34,191.34,0,0,0-23,29.72,176.8,176.8,0,0,0-16.34,33.49,172.22,172.22,0,0,0-8.9,36.76L0,241a1.58,1.58,0,0,0,1.37,1.77,1.6,1.6,0,0,0,1-.22l79-47.9a1.55,1.55,0,0,0,.69-.86l1-3.16a145,145,0,0,1,26.41-47.86,170.28,170.28,0,0,1,41.5-36A196.9,196.9,0,0,1,203.12,84.1a214.83,214.83,0,0,1,59.32-7.55c2.86,0,5.77.12,8.63.27s5.76.34,8.62.6,5.75.57,8.6.93,5.71.78,8.54,1.25a1.58,1.58,0,0,0,1.91-1.16A1.59,1.59,0,0,0,298,76.7Z" transform="translate(0.01 0.01)"/><path class="cls-1" d="M347.83,177.83l-1-18.05a1.57,1.57,0,0,0-1.65-1.48,1.49,1.49,0,0,0-.79.26l-71.16,47.15a1.55,1.55,0,0,0-.67,1L271.9,210a143.76,143.76,0,0,1-22.58,52.27,174.42,174.42,0,0,1-42.61,43,205,205,0,0,1-58.31,28.88,217.42,217.42,0,0,1-68.28,9.93c-3.3-.05-6.63-.17-9.89-.36s-6.58-.47-9.83-.8-6.51-.76-9.73-1.24-6.42-1.05-9.6-1.68a1.57,1.57,0,0,0-1.3,2.76L171.15,450.34a1.57,1.57,0,0,0,2.39-1.94l-36.87-70.52a264,264,0,0,0,40.57-5.5A251.22,251.22,0,0,0,217.75,360a238,238,0,0,0,36.9-18.61,224.15,224.15,0,0,0,32.27-24.25,201.9,201.9,0,0,0,28.2-31.37,179.69,179.69,0,0,0,19.59-34.43A167,167,0,0,0,345.6,215,161.86,161.86,0,0,0,347.83,177.83Z" transform="translate(0.01 0.01)"/><path class="cls-2" d="M258.56,209.79,196.9,181.24l61.42-95.48a1.63,1.63,0,0,0-2.23-2.26L101.25,179.84a1.63,1.63,0,0,0-.52,2.25,1.56,1.56,0,0,0,.67.6l60.26,29.12-90.33,122a1.62,1.62,0,0,0,.13,2.09,1.6,1.6,0,0,0,2.08.23l185.24-123.5a1.63,1.63,0,0,0,.46-2.26,1.67,1.67,0,0,0-.68-.58Z" transform="translate(0.01 0.01)"/><path class="cls-2" d="M451.86,199a36.63,36.63,0,0,0-12.3,10.44,32.45,32.45,0,0,0-6.35,14.49l-4.09,24.49h104.4l-4,24H425.12l-8.88,53.25H380.3l3-17.79,14-84a60.11,60.11,0,0,1,11.49-26.51,67.08,67.08,0,0,1,22.41-19.21,58.25,58.25,0,0,1,27.69-7.07H546.4l-4,24H466.76A31.6,31.6,0,0,0,451.86,199Z" transform="translate(0.01 0.01)"/><path class="cls-2" d="M772.68,325.65,742.8,208.39l-46.3,78,60.5,2.68L637.67,325.65,729,171.09h39.76l39.83,154.56Z" transform="translate(0.01 0.01)"/><path class="cls-2" d="M643.32,301.39H597.81q-11.91,0-18.91-8.43t-5-20.33l16.93-101.54H554.61L537.69,272.63q-2.4,14.39,2.65,26.51a41.67,41.67,0,0,0,16.11,19.32,45.49,45.49,0,0,0,25.42,7.19H629Z" transform="translate(0.01 0.01)"/><path class="cls-2" d="M975.05,170.86h36.17l-25.8,154.79H949.25L966,225l-37.44,45.37H892.87l-23.7-46.5-17,101.77H816l25.8-154.79H878l37.1,72.57Z" transform="translate(0.01 0.01)"/><path class="cls-2" d="M1138.15,301.39h-75.68q-11.91,0-18.92-8.43t-5-20.33l16.92-101.54H1019.3l-16.93,101.54a47.91,47.91,0,0,0,2.63,26.51,41.7,41.7,0,0,0,16.1,19.32,45.57,45.57,0,0,0,25.42,7.19h77.29Z" transform="translate(0.01 0.01)"/></g></g></svg>
|
||||
|
After Width: | Height: | Size: 3.0 KiB |
|
Before Width: | Height: | Size: 66 KiB After Width: | Height: | Size: 4.2 KiB |
@@ -1 +1,28 @@
|
||||
<svg width="1085" height="1014" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" overflow="hidden"><defs><clipPath id="clip0"><rect x="2788" y="393" width="1085" height="1014"/></clipPath><clipPath id="clip1"><rect x="3529" y="767" width="345" height="405"/></clipPath><clipPath id="clip2"><rect x="3529" y="767" width="345" height="405"/></clipPath><clipPath id="clip3"><rect x="3529" y="767" width="345" height="405"/></clipPath><clipPath id="clip4"><rect x="2788" y="767" width="344" height="405"/></clipPath><clipPath id="clip5"><rect x="2788" y="767" width="344" height="405"/></clipPath><clipPath id="clip6"><rect x="2788" y="767" width="344" height="405"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-2788 -393)"><path d="M2796 895C2796 617.753 3027.92 393 3314 393 3600.08 393 3832 617.753 3832 895 3832 1172.25 3600.08 1397 3314 1397 3027.92 1397 2796 1172.25 2796 895Z" fill="#FFFFFF" fill-rule="evenodd"/><path d="M3678.58 1259.53C3475.89 1455.39 3147.92 1454.78 2946.01 1258.16 2945.69 1257.84 2945.36 1257.52 2945.04 1257.21L3313 903.513Z" fill="#203864" fill-rule="evenodd"/><path d="M3082 703.5C3082 578.96 3186.32 478 3315 478 3443.68 478 3548 578.96 3548 703.5 3548 828.04 3443.68 929 3315 929 3186.32 929 3082 828.04 3082 703.5Z" fill="#203864" fill-rule="evenodd"/><path d="M3314 572 3680 693 3314 814 2948 693Z" fill="#8FAADC" fill-rule="evenodd"/><path d="M3313 588 3313 803 2969 695.5Z" fill="#2F5597" fill-rule="evenodd"/><g clip-path="url(#clip1)"><g clip-path="url(#clip2)"><g clip-path="url(#clip3)"><path d="M121.479 317.99 160.781 185.792 100.042 185.792 130.126 25.8679 220.02 25.8679 182.219 142.917 242.958 142.917 121.479 317.99Z" stroke="#FFC000" stroke-width="30.87" fill="#FFC000" transform="matrix(1 0 0 1.17493 3530 768)"/></g></g></g><g clip-path="url(#clip4)"><g clip-path="url(#clip5)"><g clip-path="url(#clip6)"><path d="M121.833 318.916 161.25 186.333 100.333 186.333 130.505 25.9433 220.662 25.9433 182.75 143.333 243.666 143.333 121.833 318.916Z" stroke="#FFC000" stroke-width="30.96" fill="#FFC000" transform="matrix(1 0 0 1.17151 2788 768)"/></g></g></g></g></svg>
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Generator: Adobe Illustrator 26.0.2, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 348.1 450.7" style="enable-background:new 0 0 348.1 450.7;" xml:space="preserve">
|
||||
<style type="text/css">
|
||||
.st0{fill:#FE9807;}
|
||||
.st1{fill:#505D66;}
|
||||
</style>
|
||||
<g id="Layer_2_00000027592924314629136480000002286563024165190831_">
|
||||
<g id="图层_1">
|
||||
<path class="st0" d="M298,76.7L173.8,0.2c-0.7-0.5-1.7-0.3-2.2,0.5c-0.4,0.6-0.3,1.3,0.1,1.9L211.6,51
|
||||
c-11.2,0.6-22.3,1.9-33.4,3.9c-11.9,2.2-23.6,5.2-35,9c-11.3,3.8-22.4,8.5-33,13.9c-10.6,5.5-20.8,11.7-30.5,18.8
|
||||
c-10.4,7.6-20.1,16.1-29,25.5c-8.6,9.1-16.3,19.1-23,29.7c-6.6,10.6-12.1,21.8-16.3,33.5C7,197.2,4,209.6,2.4,222.1L0,241
|
||||
c-0.1,0.9,0.5,1.7,1.4,1.8c0.4,0,0.7,0,1-0.2l78.9-47.9c0.3-0.2,0.6-0.5,0.7-0.9l1-3.2c5.7-17.5,14.7-33.7,26.4-47.9
|
||||
c11.8-14.2,25.8-26.3,41.5-36c16.2-10,33.8-17.7,52.2-22.7c19.3-5.3,39.3-7.8,59.3-7.6c2.9,0,5.8,0.1,8.6,0.3s5.8,0.3,8.6,0.6
|
||||
s5.8,0.6,8.6,0.9s5.7,0.8,8.5,1.2c0.8,0.2,1.7-0.3,1.9-1.2C298.9,77.8,298.6,77.1,298,76.7z"/>
|
||||
<path class="st0" d="M347.8,177.8l-1-18.1c0-0.9-0.8-1.5-1.7-1.5c-0.3,0-0.6,0.1-0.8,0.3l-71.2,47.1c-0.3,0.2-0.6,0.6-0.7,1
|
||||
l-0.7,3.3c-4,18.8-11.6,36.5-22.6,52.3c-11.6,16.7-26,31.3-42.6,43c-17.8,12.7-37.5,22.4-58.3,28.9c-22.1,6.9-45.1,10.3-68.3,9.9
|
||||
c-3.3,0-6.6-0.2-9.9-0.4s-6.6-0.5-9.8-0.8s-6.5-0.8-9.7-1.2s-6.4-1-9.6-1.7c-0.8-0.2-1.7,0.4-1.9,1.2c-0.1,0.6,0.1,1.2,0.6,1.5
|
||||
l131.4,107.6c0.3,0.2,0.6,0.4,1,0.4c0.9,0,1.6-0.7,1.6-1.6c0-0.3-0.1-0.5-0.2-0.7l-36.9-70.5c13.6-0.8,27.2-2.6,40.6-5.5
|
||||
c13.8-3,27.4-7.1,40.5-12.4c12.8-5.1,25.2-11.3,36.9-18.6c11.5-7.1,22.3-15.2,32.3-24.2c10.5-9.4,19.9-20,28.2-31.4
|
||||
c7.8-10.7,14.3-22.3,19.6-34.4c5-11.7,8.7-23.9,10.9-36.3C347.8,202.7,348.5,190.3,347.8,177.8z"/>
|
||||
<path class="st1" d="M258.6,209.8l-61.7-28.5l61.4-95.5c0.5-0.8,0.3-1.8-0.5-2.2c-0.5-0.3-1.2-0.3-1.7,0l-154.8,96.3
|
||||
c-0.8,0.5-1,1.5-0.5,2.2c0.2,0.3,0.4,0.5,0.7,0.6l60.3,29.1l-90.3,122c-0.5,0.6-0.4,1.5,0.1,2.1c0.5,0.6,1.4,0.7,2.1,0.2
|
||||
l185.2-123.5c0.8-0.5,1-1.5,0.5-2.3C259.1,210.1,258.8,209.9,258.6,209.8L258.6,209.8z"/>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 2.1 KiB After Width: | Height: | Size: 2.2 KiB |
@@ -3923,9 +3923,9 @@ flux@^4.0.1:
|
||||
fbjs "^3.0.1"
|
||||
|
||||
follow-redirects@^1.0.0, follow-redirects@^1.14.0:
|
||||
version "1.14.6"
|
||||
resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.14.6.tgz#8cfb281bbc035b3c067d6cd975b0f6ade6e855cd"
|
||||
integrity sha512-fhUl5EwSJbbl8AR+uYL2KQDxLkdSjZGR36xy46AO7cOMTrCMON6Sa28FmAnC2tRTDbd/Uuzz3aJBv7EBN7JH8A==
|
||||
version "1.14.7"
|
||||
resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.14.7.tgz#2004c02eb9436eee9a21446a6477debf17e81685"
|
||||
integrity sha512-+hbxoLbFMbRKDwohX8GkTataGqO6Jb7jGwpAlwgy2bIz25XtRm7KEzJM76R1WiNT5SwZkX4Y75SwBolkpmE7iQ==
|
||||
|
||||
fork-ts-checker-webpack-plugin@^6.0.5:
|
||||
version "6.5.0"
|
||||
@@ -4671,9 +4671,9 @@ is-ci@^2.0.0:
|
||||
ci-info "^2.0.0"
|
||||
|
||||
is-core-module@^2.2.0:
|
||||
version "2.8.0"
|
||||
resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.8.0.tgz#0321336c3d0925e497fd97f5d95cb114a5ccd548"
|
||||
integrity sha512-vd15qHsaqrRL7dtH6QNuy0ndJmRDrS9HAM1CAiSifNUFv4x1a0CCVsj18hJ1mShxIG6T2i1sO78MkP56r0nYRw==
|
||||
version "2.8.1"
|
||||
resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.8.1.tgz#f59fdfca701d5879d0a6b100a40aa1560ce27211"
|
||||
integrity sha512-SdNCUs284hr40hFTFP6l0IfZ/RSrMXF3qgoRHd3/79unUTvrFO/JoXwkGm+5J/Oe3E/b5GsnG330uUNgRpu1PA==
|
||||
dependencies:
|
||||
has "^1.0.3"
|
||||
|
||||
@@ -5487,9 +5487,7 @@ node-fetch@2.6.1:
|
||||
integrity sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw==
|
||||
|
||||
node-forge@^0.10.0:
|
||||
version "0.10.0"
|
||||
resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-0.10.0.tgz#32dea2afb3e9926f02ee5ce8794902691a676bf3"
|
||||
integrity sha512-PPmu8eEeG9saEUvI97fm4OYxXVB6bFvyNTyiUOBichBpFG8A1Ljw3bY62+5oOjDEMHRnd0Y7HQ+x7uzxOzC6JA==
|
||||
version "1.0.0"
|
||||
|
||||
node-releases@^2.0.1:
|
||||
version "2.0.1"
|
||||
@@ -5806,7 +5804,7 @@ path-key@^3.0.0, path-key@^3.1.0:
|
||||
resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375"
|
||||
integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==
|
||||
|
||||
path-parse@^1.0.6:
|
||||
path-parse@^1.0.6, path-parse@^1.0.7:
|
||||
version "1.0.7"
|
||||
resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735"
|
||||
integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==
|
||||
@@ -6774,7 +6772,16 @@ resolve-pathname@^3.0.0:
|
||||
resolved "https://registry.yarnpkg.com/resolve-pathname/-/resolve-pathname-3.0.0.tgz#99d02224d3cf263689becbb393bc560313025dcd"
|
||||
integrity sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==
|
||||
|
||||
resolve@^1.1.6, resolve@^1.14.2, resolve@^1.3.2:
|
||||
resolve@^1.1.6:
|
||||
version "1.21.0"
|
||||
resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.21.0.tgz#b51adc97f3472e6a5cf4444d34bc9d6b9037591f"
|
||||
integrity sha512-3wCbTpk5WJlyE4mSOtDLhqQmGFi0/TD9VPwmiolnk8U0wRgMEktqCXd3vy5buTO3tljvalNvKrjHEfrd2WpEKA==
|
||||
dependencies:
|
||||
is-core-module "^2.8.0"
|
||||
path-parse "^1.0.7"
|
||||
supports-preserve-symlinks-flag "^1.0.0"
|
||||
|
||||
resolve@^1.14.2, resolve@^1.3.2:
|
||||
version "1.20.0"
|
||||
resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.20.0.tgz#629a013fb3f70755d6f0b7935cc1c2c5378b1975"
|
||||
integrity sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A==
|
||||
@@ -7052,10 +7059,9 @@ shell-quote@^1.7.2:
|
||||
integrity sha512-Vpfqwm4EnqGdlsBFNmHhxhElJYrdfcxPThu+ryKS5J8L/fhAwLazFZtq+S+TWZ9ANj2piSQLGj6NQg+lKPmxrw==
|
||||
|
||||
shelljs@^0.8.4:
|
||||
version "0.8.4"
|
||||
resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.8.4.tgz#de7684feeb767f8716b326078a8a00875890e3c2"
|
||||
integrity sha512-7gk3UZ9kOfPLIAbslLzyWeGiEqx9e3rxwZM0KE6EL8GlGwjym9Mrlx5/p33bWTu9YG6vcS4MBxYZDHYr5lr8BQ==
|
||||
dependencies:
|
||||
version "0.8.5"
|
||||
resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.8.5.tgz#de055408d8361bed66c669d2f000538ced8ee20c"
|
||||
integrity sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow== dependencies:
|
||||
glob "^7.0.0"
|
||||
interpret "^1.0.0"
|
||||
rechoir "^0.6.2"
|
||||
@@ -7333,6 +7339,11 @@ supports-color@^8.0.0:
|
||||
dependencies:
|
||||
has-flag "^4.0.0"
|
||||
|
||||
supports-preserve-symlinks-flag@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09"
|
||||
integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==
|
||||
|
||||
svg-parser@^2.0.2:
|
||||
version "2.0.4"
|
||||
resolved "https://registry.yarnpkg.com/svg-parser/-/svg-parser-2.0.4.tgz#fdc2e29e13951736140b76cb122c8ee6630eb6b5"
|
||||
|
||||