Skip to content

Typed Optimization #531

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 26 commits into from
Dec 27, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
cf963b1
WIP
till-m Nov 9, 2022
81321f3
Add ML example
till-m Nov 9, 2022
4106850
Save for merge
till-m May 23, 2023
5a3f2de
Merge remote-tracking branch 'origin/master' into parameter-types
till-m May 23, 2023
ac7f253
Merge remote-tracking branch 'origin/master' into parameter-types
till-m May 25, 2023
0ff88fc
Merge branch 'master' into parameter-types
till-m Oct 1, 2024
5d34efa
Update
till-m Oct 6, 2024
2b64ff0
Parameter types more (#13)
phi-friday Oct 9, 2024
3920e0f
Use `.masks` not `._masks`
till-m Oct 9, 2024
241e5c7
User `super` to call kernel
till-m Oct 9, 2024
68909ad
Update logging for parameters
till-m Oct 12, 2024
1a03b05
Disable SDR when non-float parameters are present
till-m Oct 12, 2024
f17c96a
Add demo script for typed optimization
till-m Oct 12, 2024
3c4c298
Update parameters, testing
till-m Oct 15, 2024
264b79e
Remove sorting, gradient optimize only continuous params
till-m Oct 29, 2024
b97c11e
Go back to `wrap_kernel`
till-m Oct 29, 2024
9543fb8
Update code
till-m Oct 30, 2024
7c84390
Remove `tqdm` dependency, use EI acq
till-m Nov 1, 2024
f1e4493
Add more text to typed optimization notebook.
till-m Nov 1, 2024
b765b5d
Merge branch 'master' into parameter-types
till-m Nov 1, 2024
187fd08
Save files while moving device
till-m Nov 15, 2024
31223a9
Update with custom parameter type example
till-m Dec 10, 2024
4476271
Merge branch 'master' into parameter-types
till-m Dec 18, 2024
9b1fbc1
Mention that parameters are not sorted
till-m Dec 18, 2024
1a54e1b
Change array reg warning
till-m Dec 18, 2024
05fbbcd
Update Citations, parameter notebook
till-m Dec 25, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Save files while moving device
  • Loading branch information
till-m committed Nov 15, 2024
commit 187fd08b1e2930fb29560060084aa18dd421a3a7
2 changes: 0 additions & 2 deletions bayes_opt/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
from bayes_opt.constraint import ConstraintModel
from bayes_opt.domain_reduction import SequentialDomainReductionTransformer
from bayes_opt.logger import JSONLogger, ScreenLogger
from bayes_opt.parameter import BayesParameter
from bayes_opt.target_space import TargetSpace

__version__ = importlib.metadata.version("bayesian-optimization")
Expand All @@ -24,5 +23,4 @@
"ScreenLogger",
"JSONLogger",
"SequentialDomainReductionTransformer",
"BayesParameter",
]
6 changes: 3 additions & 3 deletions bayes_opt/parameter.py
Original file line number Diff line number Diff line change
Expand Up @@ -477,7 +477,7 @@ def wrap_kernel(kernel: kernels.Kernel, transform: Callable[[Any], Any]) -> kern
kernel_type = type(kernel)

class WrappedKernel(kernel_type):
@copy_signature(getattr(kernel_type.__init__, "deprecated_original", kernel_type.__init__))
@_copy_signature(getattr(kernel_type.__init__, "deprecated_original", kernel_type.__init__))
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)

Expand All @@ -492,8 +492,8 @@ def __reduce__(self) -> str | tuple[Any, ...]:
return WrappedKernel(**kernel.get_params())


def copy_signature(source_fct: Callable[..., Any]) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
"""Clones a signature from a source function to a target function.
def _copy_signature(source_fct: Callable[..., Any]) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
"""Clone a signature from a source function to a target function.

via
https://stackoverflow.com/a/58989918/
Expand Down
11 changes: 11 additions & 0 deletions bayes_opt/target_space.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,22 +242,33 @@ def make_params(self, pbounds: BoundsMapping) -> dict[str, BayesParameter]:
A dictionary with the parameter names as keys and the corresponding
parameter objects as values.
"""
any_is_not_float = False # TODO: remove in an upcoming release
params: dict[str, BayesParameter] = {}
for key in pbounds:
pbound = pbounds[key]

if isinstance(pbound, BayesParameter):
res = pbound
if not isinstance(pbound, FloatParameter):
any_is_not_float = True
elif (len(pbound) == 2 and is_numeric(pbound[0]) and is_numeric(pbound[1])) or (
len(pbound) == 3 and pbound[-1] is float
):
res = FloatParameter(name=key, bounds=(float(pbound[0]), float(pbound[1])))
elif len(pbound) == 3 and pbound[-1] is int:
res = IntParameter(name=key, bounds=(int(pbound[0]), int(pbound[1])))
any_is_not_float = True
else:
# assume categorical variable with pbound as list of possible values
res = CategoricalParameter(name=key, categories=pbound)
any_is_not_float = True
params[key] = res
if any_is_not_float:
msg = (
"Non-float parameters are experimental and may not work as expected."
" Exercise caution when using them and please report any issues you encounter."
)
warn(msg, stacklevel=4)
return params

def make_masks(self) -> dict[str, NDArray[np.bool_]]:
Expand Down
8 changes: 6 additions & 2 deletions docsrc/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
Basic Tour </basic-tour>
Advanced Tour </advanced-tour>
Constrained Bayesian Optimization </constraints>
Parameter Types </parameter_types>
Sequential Domain Reduction </domain_reduction>
Acquisition Functions </acquisition_functions>
Exploration vs. Exploitation </exploitation_vs_exploration>
Expand All @@ -26,6 +27,7 @@
reference/constraint
reference/domain_reduction
reference/target_space
reference/parameter
reference/exception
reference/other

Expand Down Expand Up @@ -121,11 +123,13 @@ section. We suggest that you:
to learn how to use the package's most important features.
- Take a look at the `advanced tour
notebook <advanced-tour.html>`__
to learn how to make the package more flexible, how to deal with
categorical parameters, how to use observers, and more.
to learn how to make the package more flexible or how to use observers.
- To learn more about acquisition functions, a central building block
of bayesian optimization, see the `acquisition functions
notebook <acquisition_functions.html>`__
- If you want to optimize over integer-valued or categorical
parameters, see the `parameter types
notebook <parameter_types.html>`__.
- Check out this
`notebook <visualization.html>`__
with a step by step visualization of how this method works.
Expand Down
5 changes: 5 additions & 0 deletions docsrc/reference/parameter.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
:py:mod:`bayes_opt.parameter`
--------------------------------

.. automodule:: bayes_opt.parameter
:members:
161 changes: 28 additions & 133 deletions examples/advanced-tour.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Next point to probe is: {'x': -0.331911981189704, 'y': 1.3219469606529486}\n"
"Next point to probe is: {'x': np.float64(-0.331911981189704), 'y': np.float64(1.3219469606529486)}\n"
]
}
],
Expand Down Expand Up @@ -167,12 +167,12 @@
"name": "stdout",
"output_type": "stream",
"text": [
"-18.503835804889988 {'x': 1.953072105336, 'y': -2.9609778030491904}\n",
"-1.0819533157901717 {'x': 0.22703572807626315, 'y': 2.4249238905875123}\n",
"-6.50219704520679 {'x': -1.9991881984624875, 'y': 2.872282989383577}\n",
"-5.747604713731052 {'x': -1.994467585936897, 'y': -0.664242699361514}\n",
"-2.9682431497650823 {'x': 1.9737252084307952, 'y': 1.269540259274744}\n",
"{'target': 0.7861845912690544, 'params': {'x': -0.331911981189704, 'y': 1.3219469606529486}}\n"
"-18.707136686093495 {'x': np.float64(1.9261486197444082), 'y': np.float64(-2.9996360060323246)}\n",
"0.750594563473972 {'x': np.float64(-0.3763326769822668), 'y': np.float64(1.328297354179696)}\n",
"-6.559031075654336 {'x': np.float64(1.979183535803597), 'y': np.float64(2.9083667381450318)}\n",
"-6.915481333972961 {'x': np.float64(-1.9686133847781613), 'y': np.float64(-1.009985740060171)}\n",
"-6.8600832617014085 {'x': np.float64(-1.9763198875239296), 'y': np.float64(2.9885278383464513)}\n",
"{'target': np.float64(0.7861845912690544), 'params': {'x': np.float64(-0.331911981189704), 'y': np.float64(1.3219469606529486)}}\n"
]
}
],
Expand All @@ -190,112 +190,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2. Dealing with discrete parameters\n",
"\n",
"**There is no principled way of dealing with discrete parameters using this package.**\n",
"\n",
"Ok, now that we got that out of the way, how do you do it? You're bound to be in a situation where some of your function's parameters may only take on discrete values. Unfortunately, the nature of bayesian optimization with gaussian processes doesn't allow for an easy/intuitive way of dealing with discrete parameters - but that doesn't mean it is impossible. The example below showcases a simple, yet reasonably adequate, way to dealing with discrete parameters."
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"def func_with_discrete_params(x, y, d):\n",
" # Simulate necessity of having d being discrete.\n",
" assert type(d) == int\n",
" \n",
" return ((x + y + d) // (1 + d)) / (1 + (x + y) ** 2)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"def function_to_be_optimized(x, y, w):\n",
" d = int(w)\n",
" return func_with_discrete_params(x, y, d)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"optimizer = BayesianOptimization(\n",
" f=function_to_be_optimized,\n",
" pbounds={'x': (-10, 10), 'y': (-10, 10), 'w': (0, 5)},\n",
" verbose=2,\n",
" random_state=1,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"| iter | target | w | x | y |\n",
"-------------------------------------------------------------\n",
"| \u001b[30m1 | \u001b[30m-0.06199 | \u001b[30m2.085 | \u001b[30m4.406 | \u001b[30m-9.998 |\n",
"| \u001b[35m2 | \u001b[35m-0.0344 | \u001b[35m1.512 | \u001b[35m-7.065 | \u001b[35m-8.153 |\n",
"| \u001b[30m3 | \u001b[30m-0.2177 | \u001b[30m0.9313 | \u001b[30m-3.089 | \u001b[30m-2.065 |\n",
"| \u001b[35m4 | \u001b[35m0.1865 | \u001b[35m2.694 | \u001b[35m-1.616 | \u001b[35m3.704 |\n",
"| \u001b[30m5 | \u001b[30m-0.2187 | \u001b[30m1.022 | \u001b[30m7.562 | \u001b[30m-9.452 |\n",
"| \u001b[35m6 | \u001b[35m0.2488 | \u001b[35m2.684 | \u001b[35m-2.188 | \u001b[35m3.925 |\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"| \u001b[35m7 | \u001b[35m0.2948 | \u001b[35m2.683 | \u001b[35m-2.534 | \u001b[35m4.08 |\n",
"| \u001b[35m8 | \u001b[35m0.3202 | \u001b[35m2.514 | \u001b[35m-3.83 | \u001b[35m5.287 |\n",
"| \u001b[30m9 | \u001b[30m0.0 | \u001b[30m4.057 | \u001b[30m-4.458 | \u001b[30m3.928 |\n",
"| \u001b[35m10 | \u001b[35m0.4802 | \u001b[35m2.296 | \u001b[35m-3.518 | \u001b[35m4.558 |\n",
"| \u001b[30m11 | \u001b[30m0.0 | \u001b[30m1.084 | \u001b[30m-3.737 | \u001b[30m4.472 |\n",
"| \u001b[30m12 | \u001b[30m0.0 | \u001b[30m2.649 | \u001b[30m-3.861 | \u001b[30m4.353 |\n",
"| \u001b[30m13 | \u001b[30m0.0 | \u001b[30m2.442 | \u001b[30m-3.658 | \u001b[30m4.599 |\n",
"| \u001b[30m14 | \u001b[30m-0.05801 | \u001b[30m1.935 | \u001b[30m-0.4758 | \u001b[30m-8.755 |\n",
"| \u001b[30m15 | \u001b[30m0.0 | \u001b[30m2.337 | \u001b[30m7.973 | \u001b[30m-8.96 |\n",
"| \u001b[30m16 | \u001b[30m0.07699 | \u001b[30m0.6926 | \u001b[30m5.59 | \u001b[30m6.854 |\n",
"| \u001b[30m17 | \u001b[30m-0.02025 | \u001b[30m3.534 | \u001b[30m-8.943 | \u001b[30m1.987 |\n",
"| \u001b[30m18 | \u001b[30m0.0 | \u001b[30m2.59 | \u001b[30m-7.339 | \u001b[30m5.941 |\n",
"| \u001b[30m19 | \u001b[30m0.0929 | \u001b[30m2.237 | \u001b[30m-4.535 | \u001b[30m9.065 |\n",
"| \u001b[30m20 | \u001b[30m0.1538 | \u001b[30m0.477 | \u001b[30m2.931 | \u001b[30m2.683 |\n",
"| \u001b[30m21 | \u001b[30m0.0 | \u001b[30m0.9999 | \u001b[30m4.397 | \u001b[30m-3.971 |\n",
"| \u001b[30m22 | \u001b[30m-0.01894 | \u001b[30m3.764 | \u001b[30m-7.043 | \u001b[30m-3.184 |\n",
"| \u001b[30m23 | \u001b[30m0.03683 | \u001b[30m1.851 | \u001b[30m5.783 | \u001b[30m7.966 |\n",
"| \u001b[30m24 | \u001b[30m-0.04359 | \u001b[30m1.615 | \u001b[30m-5.133 | \u001b[30m-6.556 |\n",
"| \u001b[30m25 | \u001b[30m0.02617 | \u001b[30m3.863 | \u001b[30m0.1052 | \u001b[30m8.579 |\n",
"| \u001b[30m26 | \u001b[30m-0.1071 | \u001b[30m0.8131 | \u001b[30m-0.7949 | \u001b[30m-9.292 |\n",
"| \u001b[30m27 | \u001b[30m0.0 | \u001b[30m4.969 | \u001b[30m8.778 | \u001b[30m-8.467 |\n",
"| \u001b[30m28 | \u001b[30m-0.1372 | \u001b[30m0.9475 | \u001b[30m-1.019 | \u001b[30m-7.018 |\n",
"| \u001b[30m29 | \u001b[30m0.08078 | \u001b[30m1.917 | \u001b[30m-0.2606 | \u001b[30m6.272 |\n",
"| \u001b[30m30 | \u001b[30m0.02003 | \u001b[30m4.278 | \u001b[30m3.8 | \u001b[30m8.398 |\n",
"=============================================================\n"
]
}
],
"source": [
"optimizer.set_gp_params(alpha=1e-3)\n",
"optimizer.maximize()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 3. Tuning the underlying Gaussian Process\n",
"## 2. Tuning the underlying Gaussian Process\n",
"\n",
"The bayesian optimization algorithm works by performing a gaussian process regression of the observed combination of parameters and their associated target values. The predicted parameter $\\rightarrow$ target hyper-surface (and its uncertainty) is then used to guide the next best point to probe."
]
Expand All @@ -304,14 +199,14 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### 3.1 Passing parameter to the GP\n",
"### 2.1 Passing parameter to the GP\n",
"\n",
"Depending on the problem it could be beneficial to change the default parameters of the underlying GP. You can use the `optimizer.set_gp_params` method to do this:"
]
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 9,
"metadata": {},
"outputs": [
{
Expand All @@ -320,12 +215,12 @@
"text": [
"| iter | target | x | y |\n",
"-------------------------------------------------\n",
"| \u001b[30m1 | \u001b[30m0.7862 | \u001b[30m-0.3319 | \u001b[30m1.322 |\n",
"| \u001b[30m2 | \u001b[30m-18.19 | \u001b[30m1.957 | \u001b[30m-2.919 |\n",
"| \u001b[30m3 | \u001b[30m-12.05 | \u001b[30m-1.969 | \u001b[30m-2.029 |\n",
"| \u001b[30m4 | \u001b[30m-7.463 | \u001b[30m0.6032 | \u001b[30m-1.846 |\n",
"| \u001b[30m5 | \u001b[30m-1.093 | \u001b[30m1.444 | \u001b[30m1.096 |\n",
"| \u001b[35m6 | \u001b[35m0.8586 | \u001b[35m-0.2165 | \u001b[35m1.307 |\n",
"| \u001b[39m1 \u001b[39m | \u001b[39m0.7862 \u001b[39m | \u001b[39m-0.331911\u001b[39m | \u001b[39m1.3219469\u001b[39m |\n",
"| \u001b[39m2 \u001b[39m | \u001b[39m-18.34 \u001b[39m | \u001b[39m1.9021640\u001b[39m | \u001b[39m-2.965222\u001b[39m |\n",
"| \u001b[35m3 \u001b[39m | \u001b[35m0.8731 \u001b[39m | \u001b[35m-0.298167\u001b[39m | \u001b[35m1.1948749\u001b[39m |\n",
"| \u001b[39m4 \u001b[39m | \u001b[39m-6.497 \u001b[39m | \u001b[39m1.9876938\u001b[39m | \u001b[39m2.8830942\u001b[39m |\n",
"| \u001b[39m5 \u001b[39m | \u001b[39m-4.286 \u001b[39m | \u001b[39m-1.995643\u001b[39m | \u001b[39m-0.141769\u001b[39m |\n",
"| \u001b[39m6 \u001b[39m | \u001b[39m-6.781 \u001b[39m | \u001b[39m-1.953302\u001b[39m | \u001b[39m2.9913127\u001b[39m |\n",
"=================================================\n"
]
}
Expand All @@ -348,7 +243,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### 3.2 Tuning the `alpha` parameter\n",
"### 2.2 Tuning the `alpha` parameter\n",
"\n",
"When dealing with functions with discrete parameters,or particularly erratic target space it might be beneficial to increase the value of the `alpha` parameter. This parameters controls how much noise the GP can handle, so increase it whenever you think that extra flexibility is needed."
]
Expand All @@ -358,7 +253,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### 3.3 Changing kernels\n",
"### 2.3 Changing kernels\n",
"\n",
"By default this package uses the Matern 2.5 kernel. Depending on your use case you may find that tuning the GP kernel could be beneficial. You're on your own here since these are very specific solutions to very specific problems. You should start with the [scikit learn docs](https://scikit-learn.org/stable/modules/gaussian_process.html#kernels-for-gaussian-processes)."
]
Expand All @@ -376,7 +271,7 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -385,7 +280,7 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -399,7 +294,7 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -411,7 +306,7 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -433,7 +328,7 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -449,7 +344,7 @@
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": 15,
"metadata": {},
"outputs": [
{
Expand All @@ -476,7 +371,7 @@
},
{
"cell_type": "code",
"execution_count": 20,
"execution_count": 16,
"metadata": {},
"outputs": [
{
Expand All @@ -485,7 +380,7 @@
"['optimization:start', 'optimization:step', 'optimization:end']"
]
},
"execution_count": 20,
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
Expand All @@ -497,7 +392,7 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "bayesian-optimization-t6LLJ9me-py3.10",
"language": "python",
"name": "python3"
},
Expand All @@ -511,7 +406,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.1.undefined"
"version": "3.10.13"
},
"nbdime-conflicts": {
"local_diff": [
Expand Down
Loading
Loading