Skip to content

Commit

Permalink
Merge branch 'master' of https://github.com/optuna/optuna into feat-s…
Browse files Browse the repository at this point in the history
…uggest-api
  • Loading branch information
xadrianzetx committed Oct 11, 2021
2 parents 6d97a39 + 41451e1 commit 87ce89e
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 36 deletions.
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@ Optuna can dynamically construct the search spaces for the hyperparameters.
## News

Help us create the next version of Optuna!

Optuna 3.0 Roadmap published for review. Please take a look at the [planned improvements to Optuna](https://github.com/optuna/optuna/wiki/Optuna-V3-Roadmap), and share your feedback in the github issues. PR contributions also welcome!

Please take a few minutes to fill in this survey, and let us know how you use Optuna now and what improvements you'd like.🤔

All questions optional. 🙇‍♂️
Expand Down
5 changes: 5 additions & 0 deletions optuna/trial/_trial.py
Original file line number Diff line number Diff line change
Expand Up @@ -506,6 +506,11 @@ def report(self, value: float, step: int) -> None:
function internally. Thus, it accepts all float-like types (e.g., ``numpy.float32``).
If the conversion fails, a ``TypeError`` is raised.
.. note::
If this method is called multiple times at the same ``step`` in a trial,
the reported ``value`` only the first time is stored and the reported values
from the second time are ignored.
Example:
Report intermediate scores of `SGDClassifier <https://scikit-learn.org/stable/modules/
Expand Down
5 changes: 3 additions & 2 deletions tests/integration_tests/test_lightgbm.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from functools import partial

import lightgbm as lgb
import numpy as np
import pytest

import optuna
Expand Down Expand Up @@ -116,8 +117,8 @@ def objective(
cv: bool = False,
) -> float:

dtrain = lgb.Dataset([[1.0], [2.0], [3.0]], label=[1.0, 0.0, 1.0])
dtest = lgb.Dataset([[1.0]], label=[1.0])
dtrain = lgb.Dataset(np.asarray([[1.0], [2.0], [3.0]]), label=[1.0, 0.0, 1.0])
dtest = lgb.Dataset(np.asarray([[1.0]]), label=[1.0])

if force_default_valid_names:
valid_names = None
Expand Down
37 changes: 3 additions & 34 deletions tests/integration_tests/test_pytorch_lightning.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ def validation_step( # type: ignore
return {"validation_accuracy": accuracy}

def validation_epoch_end(self, outputs: List[Dict[str, torch.Tensor]]) -> None:
if not len(outputs):
return

accuracy = sum(x["validation_accuracy"] for x in outputs) / len(outputs)
self.log("accuracy", accuracy)
Expand All @@ -69,24 +71,10 @@ def _generate_dummy_dataset(self) -> torch.utils.data.DataLoader:
return torch.utils.data.DataLoader(dataset, batch_size=1)


class ModelDDP(pl.LightningModule):
class ModelDDP(Model):
def __init__(self) -> None:

super().__init__()
self._model = nn.Sequential(nn.Linear(4, 8))

def forward(self, data: torch.Tensor) -> torch.Tensor: # type: ignore

return self._model(data)

def training_step( # type: ignore
self, batch: List[torch.Tensor], batch_nb: int
) -> Dict[str, torch.Tensor]:

data, target = batch
output = self.forward(data)
loss = F.nll_loss(output, target)
return {"loss": loss}

def validation_step( # type: ignore
self, batch: List[torch.Tensor], batch_nb: int
Expand All @@ -104,25 +92,6 @@ def validation_step( # type: ignore

self.log("accuracy", accuracy, sync_dist=True)

def configure_optimizers(self) -> torch.optim.Optimizer:

return torch.optim.SGD(self._model.parameters(), lr=1e-2)

def train_dataloader(self) -> torch.utils.data.DataLoader:

return self._generate_dummy_dataset()

def val_dataloader(self) -> torch.utils.data.DataLoader:

return self._generate_dummy_dataset()

def _generate_dummy_dataset(self) -> torch.utils.data.DataLoader:

data = torch.zeros(3, 4, dtype=torch.float32)
target = torch.zeros(3, dtype=torch.int64)
dataset = torch.utils.data.TensorDataset(data, target)
return torch.utils.data.DataLoader(dataset, batch_size=1)


def test_pytorch_lightning_pruning_callback() -> None:
def objective(trial: optuna.trial.Trial) -> float:
Expand Down

0 comments on commit 87ce89e

Please sign in to comment.