Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

I suspect bayes-optim is causing failures?? #1365

Open
microprediction opened this issue Feb 23, 2022 · 1 comment
Open

I suspect bayes-optim is causing failures?? #1365

microprediction opened this issue Feb 23, 2022 · 1 comment

Comments

@microprediction
Copy link

I notice
bayes-optim==0.2.5.5
but also that this package is causing failures on my tests

I will take a closer look and try to make a more useful comment.

@microprediction
Copy link
Author

Trace:

humpday/optimizers/bayesoptcube.py:35:


self = <bayes_opt.bayesian_optimization.BayesianOptimization object at 0x7f6c4aef78b0>
init_points = 5, n_iter = 5, acq = 'ucb', kappa = 2.576, kappa_decay = 1
kappa_decay_delay = 0, xi = 0.0, gp_params = {}
util = <bayes_opt.util.UtilityFunction object at 0x7f6c4aed3550>, iteration = 0
x_probe = array([0.39676747, 0.53881673])

def maximize(self,
             init_points=5,
             n_iter=25,
             acq='ucb',
             kappa=2.576,
             kappa_decay=1,
             kappa_decay_delay=0,
             xi=0.0,
             **gp_params):
    """Mazimize your function"""
    self._prime_subscriptions()
    self.dispatch(Events.OPTIMIZATION_START)
    self._prime_queue(init_points)
    self.set_gp_params(**gp_params)

    util = UtilityFunction(kind=acq,
                           kappa=kappa,
                           xi=xi,
                           kappa_decay=kappa_decay,
                           kappa_decay_delay=kappa_decay_delay)
    iteration = 0
    while not self._queue.empty or iteration < n_iter:
        try:
            x_probe = next(self._queue)
        except StopIteration:
            util.update_params()
          x_probe = self.suggest(util)

/opt/hostedtoolcache/Python/3.8.12/x64/lib/python3.8/site-packages/bayes_opt/bayesian_optimization.py:182:


self = <bayes_opt.bayesian_optimization.BayesianOptimization object at 0x7f6c4aef78b0>
utility_function = <bayes_opt.util.UtilityFunction object at 0x7f6c4aed3550>

def suggest(self, utility_function):
    """Most promissing point to probe next"""
    if len(self._space) == 0:
        return self._space.array_to_params(self._space.random_sample())

    # Sklearn's GP throws a large number of warnings at times, but
    # we don't really need to see them here.
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        self._gp.fit(self._space.params, self._space.target)

    # Finding argmax of the acquisition function.
  suggestion = acq_max(
        ac=utility_function.utility,
        gp=self._gp,
        y_max=self._space.target.max(),
        bounds=self._space.bounds,
        random_state=self._random_state
    )

/opt/hostedtoolcache/Python/3.8.12/x64/lib/python3.8/site-packages/bayes_opt/bayesian_optimization.py:131:


ac = <bound method UtilityFunction.utility of <bayes_opt.util.UtilityFunction object at 0x7f6c4aed3550>>
gp = GaussianProcessRegressor(alpha=1e-06, kernel=Matern(length_scale=1, nu=2.5),
n_restarts_optimizer=5, normalize_y=True,
random_state=RandomState(MT19937) at 0x7F6C4AF14440)
y_max = -0.07236198237438515, bounds = array([[0., 1.],
[0., 1.]])
random_state = RandomState(MT19937) at 0x7F6C4AF14440, n_warmup = 10000
n_iter = 10

def acq_max(ac, gp, y_max, bounds, random_state, n_warmup=10000, n_iter=10):
    """
    A function to find the maximum of the acquisition function

    It uses a combination of random sampling (cheap) and the 'L-BFGS-B'
    optimization method. First by sampling `n_warmup` (1e5) points at random,
    and then running L-BFGS-B from `n_iter` (250) random starting points.

    Parameters
    ----------
    :param ac:
        The acquisition function object that return its point-wise value.

    :param gp:
        A gaussian process fitted to the relevant data.

    :param y_max:
        The current maximum known value of the target function.

    :param bounds:
        The variables bounds to limit the search of the acq max.

    :param random_state:
        instance of np.RandomState random number generator

    :param n_warmup:
        number of times to randomly sample the aquisition function

    :param n_iter:
        number of times to run scipy.minimize

    Returns
    -------
    :return: x_max, The arg max of the acquisition function.
    """

    # Warm up with random points
    x_tries = random_state.uniform(bounds[:, 0], bounds[:, 1],
                                   size=(n_warmup, bounds.shape[0]))
    ys = ac(x_tries, gp=gp, y_max=y_max)
    x_max = x_tries[ys.argmax()]
    max_acq = ys.max()

    # Explore the parameter space more throughly
    x_seeds = random_state.uniform(bounds[:, 0], bounds[:, 1],
                                   size=(n_iter, bounds.shape[0]))
    for x_try in x_seeds:
        # Find the minimum of minus the acquisition function
        res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),
                       x_try.reshape(1, -1),
                       bounds=bounds,
                       method="L-BFGS-B")

        # See if success
        if not res.success:
            continue

        # Store it if better than previous minimum(maximum).
      if max_acq is None or -res.fun[0] >= max_acq:

E TypeError: 'float' object is not subscriptable

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant