Code in benchmarking/examples/launch_local
Comparison of baseline methods on real benchmark, using the
LocalBackend
.
from syne_tune.experiments.default_baselines import (
RandomSearch,
BayesianOptimization,
ASHA,
MOBSTER,
)
class Methods:
RS = "RS"
BO = "BO"
ASHA = "ASHA"
MOBSTER = "MOBSTER"
methods = {
Methods.RS: lambda method_arguments: RandomSearch(method_arguments),
Methods.BO: lambda method_arguments: BayesianOptimization(method_arguments),
Methods.ASHA: lambda method_arguments: ASHA(method_arguments, type="promotion"),
Methods.MOBSTER: lambda method_arguments: MOBSTER(
method_arguments, type="promotion"
),
}
from benchmarking.examples.launch_local.baselines import methods
from benchmarking.benchmark_definitions import (
real_benchmark_definitions as benchmark_definitions,
)
from syne_tune.experiments.launchers.hpo_main_local import main
if __name__ == "__main__":
main(methods, benchmark_definitions)
from pathlib import Path
import benchmarking
from benchmarking.benchmark_definitions import (
real_benchmark_definitions as benchmark_definitions,
)
from benchmarking.examples.launch_local.baselines import methods
from syne_tune.experiments.launchers.launch_remote_local import launch_remote
if __name__ == "__main__":
entry_point = Path(__file__).parent / "hpo_main.py"
launch_remote(
entry_point=entry_point,
methods=methods,
benchmark_definitions=benchmark_definitions,
source_dependencies=benchmarking.__path__,
)
syne-tune[gpsearchers,aws]
tqdm