#60 dev

Merged
isleizhang merged 3 commits from mathcoder/XBBO:dev into dev 1 year ago
  1. +0
    -139
      examples/demo.py
  2. +0
    -123
      xbbo/pipeline/bbo.py
  3. +0
    -134
      xbbo/pipeline/pbt.py
  4. +0
    -135
      xbbo/pipeline/transfer_bbo.py

+ 0
- 139
examples/demo.py View File

@@ -1,139 +0,0 @@
import glob

import numpy as np
import random

from xbbo.pipeline.bbo import BBO
from xbbo.pipeline.pbt import PBT
from xbbo.pipeline.transfer_bbo import Transfer_BBO
from xbbo.utils.config import cfg, load_cfg_fom_args
from xbbo.core.constants import MAXINT

# cfg.freeze()


def experiment_main(cfg_clone): # pragma: main
# seed = cfg.GENARAL.random_seed
SEED = cfg_clone.GENERAL.random_seed
rng = np.random.RandomState(SEED)
if cfg_clone.GENERAL.pipeline == 'BBO':

# opt_kwargs = load_optimizer_kwargs(args[CmdArgs.optimizer], args[CmdArgs.optimizer_root])
for r in range(cfg_clone.repeat_num):
seed = rng.randint(MAXINT)
# np.random.seed(SEED)
# random.seed(SEED)
bbo = BBO(cfg_clone, seed)

bbo.run()
bbo.record.save_to_file(r)
print(bbo.record)
elif cfg_clone.GENERAL.pipeline == 'NAS':
for r in range(cfg_clone.repeat_num):

# SEED = cfg_clone.GENERAL.random_seed + r
# np.random.seed(SEED)
# random.seed(SEED)
nas = NAS(cfg_clone)

nas.run()
nas.record.save_to_file(r)
print(nas.record)
elif cfg_clone.GENERAL.pipeline == 'transfer_bbo':

for r in range(cfg_clone.repeat_num):

# SEED = cfg_clone.GENERAL.random_seed + r
# np.random.seed(SEED)
# random.seed(SEED)
bbo = Transfer_BBO(cfg_clone)

bbo.run()
bbo.record.save_to_file(r)
print(bbo.record)
elif cfg_clone.GENERAL.pipeline == 'PBT':

# opt_kwargs = load_optimizer_kwargs(args[CmdArgs.optimizer], args[CmdArgs.optimizer_root])
for r in range(cfg_clone.repeat_num):

# SEED = cfg_clone.GENERAL.random_seed + r
# np.random.seed(SEED)
# random.seed(SEED)
pbt = PBT(cfg_clone)

scores = pbt.run()
pbt.show_res(scores)
# pbt.show_toy_res(scores)
print(pbt)
else:
raise NotImplementedError


def main(cfg_clone):
# load_cfg_fom_args()

experiment_main(cfg_clone)


if __name__ == '__main__':
# toy_bbo_cfg_files = [
# # "toy_turbo-1.yaml"
# # "toy_turbo-5.yaml"
# # "toy_gp.yaml",
# # "toy_anneal.yaml",
# # "toy_bore.yaml",
# # "toy_cem.yaml",
# # "toy_cma.yaml",
# # "toy_de.yaml",
# # "toy_rea.yaml",
# # "toy_rs.yaml",
# # "toy_tpe.yaml"

# "bo_gp2.yaml"
# ]

# for file in toy_bbo_cfg_files:
# cfg_clone = cfg.clone()
# cfg.freeze()
# load_cfg_fom_args(cfg_clone, argv=['-c', './cfgs/'+file, '-r', '3']) # repeat 3 times with diffent seeds
# main(cfg_clone)
# cfg.defrost()
cfg_clone = cfg.clone()
cfg.freeze()
load_cfg_fom_args(cfg_clone) # repeat 3 times with diffent seeds
main(cfg_clone)
cfg.defrost()
# cfg_clone = cfg.clone()
# cfg.freeze()
# # load_cfg_fom_args(cfg_clone, argv=['-c', '../cfgs/toy_scikit.yaml', '-r', '3'])
# # load_cfg_fom_args(cfg_clone, argv=['-c', './cfgs/toy_rng.yaml', '-r', '1'])
# load_cfg_fom_args(cfg_clone, argv=['-c', './cfgs/toy_hyperopt.yaml', '-r', '1'])
# main(cfg_clone)
# cfg.defrost()

# cfg_clone = cfg.clone()
# # load_cfg_fom_args(cfg_clone, argv=['-c', '../cfgs/toy_rs.yaml', '-r', 1])
# cfg_clone = cfg.clone()
# # load_cfg_fom_args(cfg_clone, argv=['-c', '../cfgs/toy_bore.yaml', '-r', 1])
# cfg_clone = cfg.clone()
# # load_cfg_fom_args(cfg_clone, argv=['-c', '../cfgs/toy_hyperopt.yaml', '-r', 1])
# cfg_clone = cfg.clone()
# load_cfg_fom_args(cfg_clone, argv=['-c', './cfgs/toy_nevergrad.yaml', '-r', '1'])
# load_cfg_fom_args(cfg_clone, argv=['-c', './cfgs/toy_de.yaml', '-r', '1'])
# load_cfg_fom_args(cfg_clone, argv=['-c', './cfgs/toy_cma.yaml', '-r', '1'])
# load_cfg_fom_args(cfg_clone, argv=['-c', './cfgs/toy_nsga.yaml', '-r', '1'])
# load_cfg_fom_args(cfg_clone, argv=['-c', './cfgs/pbt_mnist.yaml', '-r', '1'])
# load_cfg_fom_args(cfg_clone, argv=['-c', './cfgs/pbt_toy.yaml', '-r', '1'])

# load_cfg_fom_args(cfg_clone, argv=['-c', './cfgs/toy_rea.yaml', '-r', '1'])
# load_cfg_fom_args(cfg_clone, argv=['-c', './cfgs/toy_tpe.yaml', '-r', '1'])
# load_cfg_fom_args(cfg_clone, argv=['-c', './cfgs/toy_cem.yaml', '-r', '1'])

# cfg_clone = cfg.clone()
# # load_cfg_fom_args(cfg_clone, argv=['-c', '../cfgs/toy_opentuner.yaml', '-r', 1])
# cfg_clone = cfg.clone()
# # load_cfg_fom_args(cfg_clone, argv=['-c', '../cfgs/toy_pysot.yaml', '-r', 1])
# cfg_clone = cfg.clone()
# load_cfg_fom_args(cfg_clone, argv=['-c', '../cfgs/toy_scikit.yaml', '-r', 1])
# main(cfg_clone)
# benchmark_opt()

+ 0
- 123
xbbo/pipeline/bbo.py View File

@@ -1,123 +0,0 @@
import numpy as np
from time import time
import tqdm

from xbbo.problem import problem_register
from xbbo.search_algorithm import alg_register
from xbbo.configspace import build_space
from xbbo.core.constants import MAXINT, Key
from xbbo.utils.record import Record


class BBO:

def __init__(self, cfg, seed):
# setup TestProblem
self.cfg = cfg
self.rng = np.random.RandomState(seed)
self.function_instance = problem_register[cfg.TEST_PROBLEM.name](seed=self.rng.randint(MAXINT), **cfg.TEST_PROBLEM.kwargs)

self.api_config = self.function_instance.get_api_config() # 优化的hp
self.config_spaces = build_space(self.api_config,seed=self.rng.randint(MAXINT))

# Setup optimizer
opt_class = alg_register[cfg.OPTM.name]
self.optimizer_instance = opt_class(self.config_spaces,suggest_limit=cfg.OPTM.max_call,seed=self.rng.randint(MAXINT), **dict(cfg.OPTM.kwargs))


self.n_suggestions = cfg.OPTM.n_suggestions
self.n_obj = cfg.OPTM.n_obj

assert self.n_suggestions >= 1, "batch size must be at least 1"
assert self.n_obj >= 1, "Must be at least one objective"


# self.suggest_time = np.zeros(n_calls)
# self.observe_time = np.zeros(n_calls)
# self.eval_time = np.zeros((n_calls, n_suggestions))
# self.function_evals = np.zeros((n_calls, n_suggestions, self.n_obj))
# self.suggest_log = [None] * n_calls
self.n_calls = cfg.OPTM.max_call
self.record = Record(self.cfg.GENERAL.exp_dir)


def evaluate(self, param):
return self.function_instance.evaluate(param)

def run(self):
pbar = tqdm.tqdm(range(self.n_calls))
pbar.set_description(f"Optimizer {self.cfg.OPTM.name} is running:")
for ii in pbar:

tt = time()
trial_list = self.optimizer_instance.suggest(self.n_suggestions) # TODO 1

# try:
# next_points, features = self.optimizer_instance.suggest(self.n_suggestions) # TODO 1
# except Exception as e:
# # logger.warning("Failure in optimizer suggest. Falling back to random search.")
# # logger.exception(e, exc_info=True)
# print(json.dumps({"optimizer_suggest_exception": {'iter': ii}}))
# # api_config = self.function_instance.get_api_config()
# # TODO 直接随机采样
# x_guess_configs = self.optimizer_instance.space.sample_configuration(size=self.n_suggestions) # a list
# next_points = [x_guess_config.get_dict_unwarped() for x_guess_config in x_guess_configs]
# features = [x_guess_config.get_array(sparse=False) for x_guess_config in x_guess_configs]

suggest_time = time() - tt

assert len(trial_list) == self.n_suggestions, "invalid number of suggestions provided by the optimizer"
# eval_time = [None for _ in range(self.n_suggestions)]
function_evals = []
# losses = []
for trial in (trial_list):
# try:
tt = time()

f_current_eval = self.evaluate(trial.config_dict) # TODO 2
eval_time = time() - tt

# except Exception as e:
# f_current_eval = np.full((len(self.cfg.TEST_PROBLEM.func_evals),), np.inf, dtype=float)
# loss = np.full((len(self.cfg.TEST_PROBLEM.losses),), np.inf, dtype=float)


trial.add_observe_value(observe_value=f_current_eval, obs_info={Key.EVAL_TIME:eval_time})
function_evals.append(f_current_eval)
# if self.cfg.OPTM.n_obj == 1:
# eval_list = np.asarray(function_evals)[:, :self.cfg.OPTM.n_obj].ravel().tolist() # TODO
# else:
# raise NotImplementedError()
# eval_list = np.array(losses)[:, :self.cfg.OPTM.n_obj].tolist() # TODO
# assert self.cfg.OPTM.n_obj == 1
tt = time()
self.optimizer_instance.observe(trial_list) # TODO 3
# try:
# self.optimizer_instance.observe(features, eval_list) # TODO 3
# except Exception as e:
# logger.warning("Failure in optimizer observe. Ignoring these observations.")
# logger.exception(e, exc_info=True)
# print(json.dumps({"optimizer_observe_exception": {'iter': ii}}))
observe_time = time() - tt
timing = {
'suggest_time_per_suggest': suggest_time,
'observe_time_per_suggest': observe_time,
'eval_time_per_suggest': sum(trial.time for trial in trial_list)
}
self.record.append([trial.dense_array if trial.sparse_array is None else trial.sparse_array for trial in trial_list], function_evals, timing=timing, suggest_point=[trial.config_dict for trial in trial_list])
# print(self.optimizer_instance.trials.best_observe_value)
print(function_evals)

print(self.optimizer_instance.trials.best_observe_value)


class BBO_REBAR(BBO):
def __init__(self):
BBO.__init__(self)
def evaluate(self,params):
return BBO.evaluate(self, params)
def run(self):
pass

+ 0
- 134
xbbo/pipeline/pbt.py View File

@@ -1,134 +0,0 @@
import numpy as np
from time import time
import tqdm
from matplotlib import pyplot as plt

from xbbo.problem import problem_register
from xbbo.search_algorithm import alg_register
from xbbo.configspace import build_space


class PBT:

def __init__(self, cfg):
# setup TestProblem
self.cfg = cfg
self.pop_size = cfg.OPTM.pop_size

self.population_model = [problem_register[cfg.TEST_PROBLEM.name](cfg) for _ in range(self.pop_size)]
self.api_config = self.population_model[0].get_api_config() # 优化的hp
self.config_spaces = build_space(self.api_config)

# Setup optimizer
opt_class = alg_register[cfg.OPTM.name]

self.optimizer_instance = opt_class(self.config_spaces, self.pop_size, **dict(cfg.OPTM.kwargs))
self.n_suggestions = cfg.OPTM.n_suggestions
self.n_obj = cfg.OPTM.n_obj

assert self.n_suggestions >= 1, "batch size must be at least 1"
assert self.n_obj >= 1, "Must be at least one objective"

self.epoch = cfg.OPTM.epoch
self.interval = cfg.OPTM.interval

# self.record = Record(self.cfg)

def evaluate(self, population_model_history_hp):
model = self.population_model[0]
for step_num, params, acc in population_model_history_hp:
model.update_hp(params)
for i in range(step_num):
model.step()

def run(self):
self.optimizer_instance.init_model_hp(self.population_model)
finished = False
with tqdm.tqdm(total=int(len(self.population_model[-1]) * self.epoch)) as pbar:
while not finished:
for i in range(self.pop_size):
self.population_model[i].step(int(self.interval * len(self.population_model[i])))
if self.population_model[i].step_num == int(len(self.population_model[i]) * self.epoch):
finished = True
# while True:
# self.population_model[i].step()
# if self.population_model[i].step_num % (self.interval * len(self.population_model[i])) == 0:
# if self.population_model[i].step_num == len(self.population_model[i]) * self.epoch:
# finished = True
# # self.population_model[i].ready = True
# break
# asynchronous wait all active
for i in range(self.pop_size):
self.population_model[i].evaluate()
scores = [net.score for net in self.population_model]
pbar.update(self.interval * len(self.population_model[-1]))
if finished:
break
self.optimizer_instance.exploit_and_explore(self.population_model, scores)
# self.optimizer_instance.exploit_and_explore_toy(self.population_model, scores)
return scores

def show_res(self, scores):
best_individual_index = np.argmax(scores)
fig, (ax1, ax2) = plt.subplots(1, 2)
for i in range(self.pop_size):
desc_data = np.array(self.population_model[i].history_score)
desc_data[:, 0] /= len(self.population_model[-1])
ax1.plot(desc_data[:, 0], desc_data[:, 1], alpha=0.5)
ax1.set_xlabel("epoch")
ax1.set_ylabel("score")
# for i in range(self.pop_size):
# desc_data = np.array([list(x[-1].values()) for x in self.population_model[i].trajectory_hp])
# # desc_data[:, 0] /= self.interval * len(self.population_model[-1])
# ax2.scatter(desc_data[:, 0], desc_data[:, 1], alpha=0.5)
# ax2.set_xlabel("hp_1")
# ax2.set_ylabel("hp_2")
for i in range(self.pop_size):
desc_data = np.array([[x[0], x[-1]['lr']] for x in self.population_model[i].history_hp])
desc_data[:, 0] /= len(self.population_model[-1])
desc_data = np.append(desc_data, [[self.epoch, desc_data[-1, 1]]], axis=0)
ax2.plot(desc_data[:, 0], desc_data[:, 1], label='best individual' if i==best_individual_index else None)
ax2.set_xlabel("epoch")
ax2.set_ylabel("lr")
plt.legend()
plt.suptitle("PBT search (lr, momentum) in MNIST")
plt.tight_layout()
plt.savefig('./out/PBT_mnist.png')
plt.show()

print('-----\nBest hyper-param strategy: {}'.format(self.population_model[best_individual_index].history_hp))
print('final score: {}'.format(self.population_model[best_individual_index].history_score[-1]))

def show_toy_res(self, scores):
best_individual_index = np.argmax(scores)
fig, (ax1, ax2) = plt.subplots(1, 2)
for i in range(self.pop_size):
desc_data = np.array(self.population_model[i].history_score)
desc_data[:, 0] /= len(self.population_model[-1])
ax1.plot(desc_data[:, 0], desc_data[:, 1], alpha=0.5)
ax1.set_xlabel("epoch")
ax1.set_ylabel("score")
for i in range(self.pop_size):
desc_data = np.array(self.population_model[i].trajectory_theta)
# desc_data[:, 0] /= self.interval * len(self.population_model[-1])
ax2.scatter(desc_data[:, 0], desc_data[:, 1], s=2, alpha=0.5)
# ax2.axis('equal')
ax2.set_xlim(0, 1)
ax2.set_ylim(0, 1)
ax2.set_xlabel(r"$\theta_1$")
ax2.set_ylabel(r"$\theta_2$")
# for i in range(self.pop_size):
# desc_data = np.array([[x[0], x[-1]['lr']] for x in self.population_model[i].history_hp])
# desc_data[:, 0] /= len(self.population_model[-1])
# desc_data = np.append(desc_data, [[self.epoch, desc_data[-1, 1]]], axis=0)
# ax2.plot(desc_data[:, 0], desc_data[:, 1], label='best individual' if i==best_individual_index else None)
# ax2.set_xlabel("epoch")
# ax2.set_ylabel("lr")
# plt.legend()
plt.suptitle("PBT toy example")
plt.tight_layout()
plt.savefig('./out/PBT_toy.png')
plt.show()

print('-----\nBest hyper-param strategy: {}'.format(self.population_model[best_individual_index].history_hp))
print('final score: {}'.format(self.population_model[best_individual_index].history_score[-1]))

+ 0
- 135
xbbo/pipeline/transfer_bbo.py View File

@@ -1,135 +0,0 @@
import numpy as np
import json
from time import time
import tqdm

from xbbo.problem import problem_register
from xbbo.search_algorithm import alg_register
from xbbo.configspace import build_space
from xbbo.utils.record import Record


class Transfer_BBO:

def __init__(self, cfg):
# setup TestProblem
self.cfg = cfg
self.function_instance = problem_register[cfg.TEST_PROBLEM.name](cfg)

self.api_config = self.function_instance.get_api_config() # 优化的hp
self.config_spaces = build_space(self.api_config)


# Setup optimizer
opt_class = alg_register[cfg.OPTM.name]
self.optimizer_instance = opt_class(self.config_spaces, **dict(cfg.OPTM.kwargs))

old_D_x_params, old_D_y, new_D_x_param = self.function_instance.array_to_config(ret_param=True)
# old_D_x_params, old_D_y, new_D_x_param = self.function_instance.array_to_config()
# self.function_instance.cache(new_D_x_param)

self.optimizer_instance.prepare(old_D_x_params, old_D_y, new_D_x_param,
np.argsort(list(self.api_config.keys())), params=True)
# self.optimizer_instance.prepare(old_D_x_params, old_D_y, new_D_x_param,
# np.argsort(list(self.api_config.keys())))
# old_D_x_params, old_D_y, new_D_x_param = self.function_instance.array_to_config()
# # self.function_instance.cache(new_D_x_param)
#
# self.optimizer_instance.prepare(old_D_x_params, old_D_y, new_D_x_param, np.argsort(list(self.api_config.keys())))

self.n_suggestions = cfg.OPTM.n_suggestions
self.n_obj = cfg.OPTM.n_obj

assert self.n_suggestions >= 1, "batch size must be at least 1"
assert self.n_obj >= 1, "Must be at least one objective"


# self.suggest_time = np.zeros(n_calls)
# self.observe_time = np.zeros(n_calls)
# self.eval_time = np.zeros((n_calls, n_suggestions))
# self.function_evals = np.zeros((n_calls, n_suggestions, self.n_obj))
# self.suggest_log = [None] * n_calls
self.n_calls = cfg.OPTM.max_call
self.record = Record(self.cfg)

# # 预加载观测
# if history:
# self._load_history_and_obs(history)



# def _load_history_and_obs(self, filename):
# obs_x, obs_y, isFeature = load_history(filename)
# for i in range(len(obs_x)):
# if not isFeature:
# obs_x[i] = Configurations.dictUnwarped_to_array(
# self.optimizer_instance.space,
# obs_x[i]
# )
# obs_x = self.optimizer_instance.transform_sparseArray_to_optSpace(obs_x)
# self.optimizer_instance.observe(obs_x, obs_y)
# print('成功加载先前观测!')


def evaluate(self, param):
return self.function_instance.evaluate(param)

def run(self):
pbar = tqdm.tqdm(range(self.n_calls))
pbar.set_description(f"Optimizer {self.cfg.OPTM.name} is running:")
for ii in pbar:

tt = time()
next_points, features = self.optimizer_instance.suggest(self.n_suggestions) # TODO 1

# try:
# next_points, features = self.optimizer_instance.suggest(self.n_suggestions) # TODO 1
# except Exception as e:
# # logger.warning("Failure in optimizer suggest. Falling back to random search.")
# # logger.exception(e, exc_info=True)
# print(json.dumps({"optimizer_suggest_exception": {'iter': ii}}))
# # api_config = self.function_instance.get_api_config()
# # TODO 直接随机采样
# x_guess_configs = self.optimizer_instance.space.sample_configuration(size=self.n_suggestions) # a list
# next_points = [x_guess_config.get_dict_unwarped() for x_guess_config in x_guess_configs]
# features = [x_guess_config.get_array(sparse=False) for x_guess_config in x_guess_configs]

suggest_time = time() - tt

assert len(next_points) == self.n_suggestions, "invalid number of suggestions provided by the optimizer"
# eval_time = [None for _ in range(self.n_suggestions)]
function_evals = []
losses = []
tt = time()
for next_point in (next_points):
# try:
f_current_eval, loss = self.evaluate(next_point) # TODO 2
# except Exception as e:
# f_current_eval = np.full((len(self.cfg.TEST_PROBLEM.func_evals),), np.inf, dtype=float)
# loss = np.full((len(self.cfg.TEST_PROBLEM.losses),), np.inf, dtype=float)



function_evals.append(f_current_eval)
losses.append(loss)
eval_time = time() - tt
eval_list = np.asarray(losses)[:, :self.cfg.OPTM.n_obj].ravel().tolist() # TODO
assert self.cfg.OPTM.n_obj == 1
tt = time()
self.optimizer_instance.observe(features, eval_list) # TODO 3
# try:
# self.optimizer_instance.observe(features, eval_list) # TODO 3
# except Exception as e:
# logger.warning("Failure in optimizer observe. Ignoring these observations.")
# logger.exception(e, exc_info=True)
# print(json.dumps({"optimizer_observe_exception": {'iter': ii}}))
observe_time = time() - tt
timing = {
'suggest_time_per_suggest': suggest_time,
'observe_time_per_suggest': observe_time,
'eval_time_per_suggest': eval_time
}
self.record.append(features, losses, function_evals, timing=timing, suggest_point=next_points)



Loading…
Cancel
Save