Code in benchmarking/examples/launch_sagemaker

Comparison of baseline methods on real benchmark, using the SageMakerBackend.

benchmarking/examples/launch_sagemaker/baselines.py
from syne_tune.experiments.default_baselines import (
    RandomSearch,
    BayesianOptimization,
    ASHA,
    MOBSTER,
)


class Methods:
    RS = "RS"
    BO = "BO"
    ASHA = "ASHA"
    MOBSTER = "MOBSTER"


methods = {
    Methods.RS: lambda method_arguments: RandomSearch(method_arguments),
    Methods.BO: lambda method_arguments: BayesianOptimization(method_arguments),
    Methods.ASHA: lambda method_arguments: ASHA(method_arguments, type="promotion"),
    Methods.MOBSTER: lambda method_arguments: MOBSTER(
        method_arguments, type="promotion"
    ),
}
benchmarking/examples/launch_sagemaker/hpo_main.py
from benchmarking.examples.launch_sagemaker.baselines import methods
from benchmarking.benchmark_definitions import (
    real_benchmark_definitions as benchmark_definitions,
)
from syne_tune.experiments.launchers.hpo_main_sagemaker import main


if __name__ == "__main__":
    main(methods, benchmark_definitions)
benchmarking/examples/launch_sagemaker/launch_remote.py
from pathlib import Path

import benchmarking
from benchmarking.benchmark_definitions import (
    real_benchmark_definitions as benchmark_definitions,
)
from benchmarking.examples.launch_sagemaker.baselines import methods
from syne_tune.experiments.launchers.launch_remote_sagemaker import launch_remote


if __name__ == "__main__":
    entry_point = Path(__file__).parent / "hpo_main.py"
    launch_remote(
        entry_point=entry_point,
        methods=methods,
        benchmark_definitions=benchmark_definitions,
        source_dependencies=benchmarking.__path__,
    )
benchmarking/examples/launch_sagemaker/requirements.txt
syne-tune[gpsearchers,aws]
tqdm