This commit is contained in:
Anonymous-submission-repo
2022-10-15 03:42:23 +00:00
parent 10d36dcc7b
commit 44883f7463
5 changed files with 50 additions and 49 deletions

View File

@@ -3689,4 +3689,5 @@ class AutoML(BaseEstimator):
if inv[i]:
q += inv[i] / s
if p < q:
return estimator_list[i]
return estimator_list[i]

View File

@@ -114,8 +114,8 @@ class BlendSearch(Searcher):
on the nature of the resource budget). When cost_attr is set to None, cost differences between different trials will be omitted
in our search algorithm.
lexico_objectives: dict, default=None | It specifics information needed to perform multi-objective
optimization with lexicographic preferences. This is only supported in CFO.
When lexico_objectives it not None, the arguments metric, mode will be invalid.
optimization with lexicographic preferences. This is only supported in CFO currently.
When lexico_objectives is not None, the arguments metric, mode will be invalid.
This dictionary shall contain the following fields of key-value pairs:
- "metrics": a list of optimization objectives with the orders reflecting the priorities/preferences of the
objectives.

View File

@@ -123,9 +123,9 @@ class FLOW2(Searcher):
self.cost_attr = cost_attr
self.max_resource = max_resource
self._resource = None
self._f_best = None
self._f_best = None # only use for lexico_comapre. It represent the best value achieved by lexico_flow.
self._step_lb = np.Inf
self._histories = None
self._histories = None # only use for lexico_comapre. It records the result of historical configurations.
if space is not None:
self._init_search()
@@ -345,41 +345,41 @@ class FLOW2(Searcher):
self._init_search()
return True
def lexico_compare(self, result) -> bool:
def update_fbest():
obj_initial = self.lexico_objectives["metrics"][0]
feasible_index = [*range(len(self._histories[obj_initial]))]
for k_metric in self.lexico_objectives["metrics"]:
k_values = np.array(self._histories[k_metric])
self._f_best[k_metric] = np.min(k_values.take(feasible_index))
feasible_index_prior = np.where(
k_values
<= max(
[
self._f_best[k_metric]
+ self.lexico_objectives["tolerances"][k_metric],
self.lexico_objectives["targets"][k_metric],
]
)
)[0].tolist()
feasible_index = [
val for val in feasible_index if val in feasible_index_prior
]
def update_fbest(self,):
obj_initial = self.lexico_objectives["metrics"][0]
feasible_index = [*range(len(self._histories[obj_initial]))]
for k_metric in self.lexico_objectives["metrics"]:
k_values = np.array(self._histories[k_metric])
self._f_best[k_metric] = np.min(k_values.take(feasible_index))
feasible_index_prior = np.where(
k_values
<= max(
[
self._f_best[k_metric]
+ self.lexico_objectives["tolerances"][k_metric],
self.lexico_objectives["targets"][k_metric],
]
)
)[0].tolist()
feasible_index = [
val for val in feasible_index if val in feasible_index_prior
]
def lexico_compare(self, result) -> bool:
if self._histories is None:
self._histories, self._f_best = defaultdict(list), {}
for k in self.lexico_objectives["metrics"]:
self._histories[k].append(result[k])
update_fbest()
self.update_fbest()
return True
else:
for k in self.lexico_objectives["metrics"]:
self._histories[k].append(result[k])
update_fbest()
self.update_fbest()
for k_metric, k_mode in zip(self.lexico_objectives["metrics"],self.lexico_objectives["modes"]):
k_c = self.lexico_objectives["targets"][k_metric] if k_mode == "min" else -1*self.lexico_objectives["targets"][k_metric]
if (result[k_metric] < max([self._f_best[k_metric] + self.lexico_objectives["tolerances"][k_metric], k_c])) and (
self.best_obj[k_metric] < max([self._f_best[k_metric] + self.lexico_objectives["tolerances"][k_metric], k_c])
k_target = self.lexico_objectives["targets"][k_metric] if k_mode == "min" else -1*self.lexico_objectives["targets"][k_metric]
if (result[k_metric] < max([self._f_best[k_metric] + self.lexico_objectives["tolerances"][k_metric], k_target])) and (
self.best_obj[k_metric] < max([self._f_best[k_metric] + self.lexico_objectives["tolerances"][k_metric], k_target])
):
continue
elif result[k_metric] < self.best_obj[k_metric]:

View File

@@ -80,7 +80,7 @@ class ExperimentAnalysis(EA):
feasible_index = [*range(len(histories[obj_initial]))]
for k_metric, k_mode in zip(self.lexico_objectives["metrics"],self.lexico_objectives["modes"]):
k_values = np.array(histories[k_metric])
k_c = self.lexico_objectives["targets"][k_metric] * -1 if k_mode == "max" else self.lexico_objectives["targets"][k_metric]
k_target = self.lexico_objectives["targets"][k_metric] * -1 if k_mode == "max" else self.lexico_objectives["targets"][k_metric]
f_best[k_metric] = np.min(k_values.take(feasible_index))
feasible_index_prior = np.where(
k_values
@@ -88,7 +88,7 @@ class ExperimentAnalysis(EA):
[
f_best[k_metric]
+ self.lexico_objectives["tolerances"][k_metric],
k_c,
k_target,
]
)
)[0].tolist()
@@ -376,7 +376,7 @@ def run(
a trial before the tuning is terminated.
use_ray: A boolean of whether to use ray as the backend.
lexico_objectives: dict, default=None | It specifics information needed to perform multi-objective
optimization with lexicographic preferences. When lexico_objectives it not None, the arguments metric,
optimization with lexicographic preferences. When lexico_objectives is not None, the arguments metric,
mode, will be invalid, and flaml's tune uses CFO
as the `search_alg`, which makes the input (if provided) `search_alg' invalid.
This dictionary shall contain the following fields of key-value pairs:
@@ -450,6 +450,7 @@ def run(
from .searcher.blendsearch import BlendSearch, CFO
if lexico_objectives != None:
logger.warning("If lexico_objectives is not None, search_alg is forced to be CFO")
search_alg = None
if search_alg is None:
flaml_scheduler_resource_attr = (
@@ -465,24 +466,24 @@ def run(
flaml_scheduler_max_resource = max_resource
flaml_scheduler_reduction_factor = reduction_factor
scheduler = None
try:
import optuna as _
if lexico_objectives is None:
SearchAlgorithm = BlendSearch
else:
if lexico_objectives is None:
try:
import optuna as _
SearchAlgorithm = BlendSearch
logger.info(
"Using search algorithm {}.".format(SearchAlgorithm.__class__.__name__)
)
except ImportError:
SearchAlgorithm = CFO
logger.warning(
"Using CFO for search. To use BlendSearch, run: pip install flaml[blendsearch]"
)
metric = metric or DEFAULT_METRIC
else:
SearchAlgorithm = CFO
logger.info(
"Using search algorithm {}.".format(SearchAlgorithm.__class__.__name__)
)
except ImportError:
SearchAlgorithm = CFO
logger.warning(
"Using CFO for search. To use BlendSearch, run: pip install flaml[blendsearch]"
)
if lexico_objectives is None:
metric = metric or DEFAULT_METRIC
else:
metric = lexico_objectives["metrics"][0] or DEFAULT_METRIC
search_alg = SearchAlgorithm(
metric=metric,

View File

@@ -523,7 +523,6 @@ We support tuning multiple objectives with lexicographic preference by providing
- `tolerances`: (optional) a dictionary to specify the optimality tolerances on objectives. The keys are the metric names (provided in "metrics"), and the values are the numerical tolerances values.
- `targets`: (optional) a dictionary to specify the optimization targets on the objectives. The keys are the metric names (provided in "metric"), and the values are the numerical target values.
When lexico_objectives is not None, the arguments metric, mode, will be invalid, and flaml's tune uses CFO as the `search_alg`, which makes the input (if provided) `search_alg' invalid.
In the following example, we want to minimize `val_loss` and `pred_time` of the model where `val_loss` has high priority. The tolerances for `val_loss` and `pre_time` are 0.02 and 0 respectively. We do not set targets for these two objectives and we set them to -inf for both objectives.
```python
@@ -536,7 +535,7 @@ lexico_objectives["targets"] = {"val_loss": -float('inf'), "pred_time": -float('
# provide the lexico_objectives to tune.run
tune.run(..., search_alg = None, lexico_objectives=lexico_objectives, ...)
```
*Please note that this is a new feature in version 1.1.0 and subject to change in the future version*
* NOTE: 1. When lexico_objectives is not None, the arguments metric, mode, will be invalid, and flaml's tune uses CFO as the `search_alg`, which makes the input (if provided) `search_alg' invalid. 2. This is a new feature in version 1.1.0 and subject to change in the future version.*
## Hyperparameter Optimization Algorithm