Skip to content

Commit

Permalink
feat: convert between trials and df
Browse files Browse the repository at this point in the history
  • Loading branch information
songlei00 committed Nov 4, 2024
1 parent feef1f9 commit 187827b
Show file tree
Hide file tree
Showing 3 changed files with 75 additions and 0 deletions.
7 changes: 7 additions & 0 deletions bbo/algorithms/bo.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,3 +263,10 @@ def suggest(self, count: Optional[int]=None) -> Sequence[Trial]:

def update(self, completed: Sequence[Trial]) -> None:
self._trials.extend(completed)

@property
def history(self) -> Sequence[Trial]:
return self._trials

def set_history(self, trials: Sequence[Trial]):
self._trials = trials
33 changes: 33 additions & 0 deletions bbo/benchmarks/analyzers/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
from typing import Sequence

import pandas as pd

from bbo.utils.trial import Trial, ParameterDict, MetricDict
from bbo.utils.problem_statement import ProblemStatement


def trials2df(trials: Sequence[Trial]):
df = dict()
for name in trials[0].parameters:
values = [t.parameters[name].value for t in trials]
df[name] = values
for obj_name in trials[0].metrics:
obj_values = [t.metrics[obj_name].value for t in trials]
df[obj_name] = obj_values
df = pd.DataFrame(df)
return df

def df2trials(df: pd.DataFrame, problem_statement: ProblemStatement):
sp = problem_statement.search_space
obj = problem_statement.objective
parameter_dict = [ParameterDict() for _ in range(len(df))]
metric_dict = [MetricDict() for _ in range(len(df))]
for name in sp.parameter_configs:
for i, v in enumerate(df[name]):
parameter_dict[i][name] = v
for name in obj.metric_informations:
for i, v in enumerate(df[name]):
metric_dict[i][name] = v

trials = [Trial(parameters=p, metrics=m) for p, m in zip(parameter_dict, metric_dict)]
return trials
35 changes: 35 additions & 0 deletions tests/benchmarks/utils_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import unittest
import random

from bbo.utils.parameter_config import SearchSpace, ScaleType
from bbo.utils.metric_config import Objective, ObjectiveMetricGoal
from bbo.utils.problem_statement import ProblemStatement
from bbo.utils.trial import Trial, MetricDict
from bbo.benchmarks.analyzers.utils import trials2df, df2trials


class UtilsTest(unittest.TestCase):
def setUp(self):
sp = SearchSpace()
sp.add_float_param('float', 0, 10)
sp.add_float_param('float_linear', 0, 10, scale_type=ScaleType.LINEAR)
sp.add_int_param('int', 1, 10)
sp.add_discrete_param('discrete', [0, 2, 4, 6])
sp.add_categorical_param('categorical', ['a', 'b', 'c'])
obj = Objective()
obj.add_metric('obj1', ObjectiveMetricGoal.MAXIMIZE)
obj.add_metric('obj2', ObjectiveMetricGoal.MAXIMIZE)
self.problem_statement = ProblemStatement(sp, obj)
n = 10
self.trials = [Trial(sp.sample()) for _ in range(n)]
metrics = [MetricDict({
'obj1': i + random.uniform(-0.5, 0.5),
'obj2': -i + random.uniform(-0.5, 0.5)
}) for i in range(n)]
for t, m in zip(self.trials, metrics):
t.complete(m)

def test_trials2df(self):
df = trials2df(self.trials)
trials = df2trials(df, self.problem_statement)
self.assertEqual(self.trials, trials)

0 comments on commit 187827b

Please sign in to comment.