Source code for syne_tune.optimizer.schedulers.searchers.bayesopt.sklearn.predictor

# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import Tuple, Dict

import numpy as np


[docs] class SKLearnPredictor: """ Base class for predictors generated by scikit-learn based estimators of :class:`~syne_tune.optimizer.schedulers.searchers.bayesopt.sklearn.estimator.SKLearnEstimator`. This is only for predictors who return means and stddevs in :meth:`predict`. """
[docs] def predict(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """ Returns signals which are statistics of the predictive distribution at input points ``inputs``. :param inputs: Input points, shape ``(n, d)`` :return: ``(means, stds)``, where predictive means ``means`` and predictive stddevs ``stds`` have shape ``(n,)`` """ raise NotImplementedError
[docs] def backward_gradient( self, input: np.ndarray, head_gradients: Dict[str, np.ndarray] ) -> np.ndarray: r""" Needs to be implemented only if gradient-based local optimization of an acquisition function is supported. Computes the gradient :math:`\nabla f(x)` for an acquisition function :math:`f(x)`, where :math:`x` is a single input point. This is using reverse mode differentiation, the head gradients are passed by the acquisition function. The head gradients are :math:`\partial_k f`, where :math:`k` runs over the statistics returned by :meth:`predict` for the single input point :math:`x`. The shape of head gradients is the same as the shape of the statistics. :param input: Single input point :math:`x`, shape ``(d,)`` :param head_gradients: See above :return: Gradient :math:`\nabla f(x)` """ raise NotImplementedError