Skip to content

Commit

Permalink
added tests for bnn train/predict
Browse files Browse the repository at this point in the history
  • Loading branch information
Moritz Freidank committed Oct 17, 2017
1 parent 1173790 commit f94adb3
Show file tree
Hide file tree
Showing 15 changed files with 361 additions and 346 deletions.
106 changes: 0 additions & 106 deletions plot_ess.py

This file was deleted.

121 changes: 120 additions & 1 deletion pysgmcmc/diagnostics/objective_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,31 +61,86 @@ def sinc(x):
# XXX Merge/write doku

def bohachevski(x):
"""
Examples
-------
>>> import numpy as np
>>> optima, f_opt = [[0., 0.]], 0.0
>>> np.allclose([bohachevski(optimum) for optimum in optima], f_opt)
True
"""

y = 0.7 + x[0] ** 2 + 2.0 * x[1] ** 2
y -= 0.3 * np.cos(3.0 * np.pi * x[0])
y -= 0.4 * np.cos(4.0 * np.pi * x[1])
return y


def branin(x):
"""
Examples
-------
>>> import numpy as np
>>> optima = [[-np.pi, 12.275], [np.pi, 2.275], [9.42478, 2.475]]
>>> f_opt = 0.39788735773
>>> np.allclose([branin(optimum) for optimum in optima], f_opt)
True
"""
y = (x[1] - (5.1 / (4 * np.pi ** 2)) * x[0] ** 2 + 5 * x[0] / np.pi - 6) ** 2
y += 10 * (1 - 1 / (8 * np.pi)) * np.cos(x[0]) + 10
return x
return y


def camelback(x):
"""
Examples
-------
>>> import numpy as np
>>> optima = [[0.0898, -0.7126], [-0.0898, 0.7126]]
>>> f_opt = -1.03162842
>>> np.allclose([camelback(optimum) for optimum in optima], f_opt)
True
"""
y = (4 - 2.1 * (x[0] ** 2) + ((x[0] ** 4) / 3)) * \
(x[0] ** 2) + x[0] * x[1] + (-4 + 4 * (x[1] ** 2)) * (x[1] ** 2)
return y


def goldstein_price(x):
"""
Examples
-------
>>> import numpy as np
>>> optima = [[0.0, -1.0]]
>>> f_opt = 3.
>>> np.allclose([goldstein_price(optimum) for optimum in optima], f_opt)
True
"""
y = (1 + (x[0] + x[1] + 1) ** 2 * (19 - 14 * x[0] + 3 * x[0] ** 2 - 14 * x[1] + 6 * x[0] * x[1] + 3 * x[1] ** 2))\
* (30 + (2 * x[0] - 3 * x[1]) ** 2 * (18 - 32 * x[0] + 12 * x[0] ** 2 + 48 * x[1] - 36 * x[0] * x[1] + 27 * x[1] ** 2))
return y


def hartmann3(x):
"""
Examples
-------
>>> import numpy as np
>>> optima = [[0.114614, 0.555649, 0.852547]]
>>> f_opt = -3.8627795317627736
>>> np.allclose([hartmann3(optimum) for optimum in optima], f_opt)
True
"""
alpha = [1.0, 1.2, 3.0, 3.2]
A = np.array([[3.0, 10.0, 30.0],
[0.1, 10.0, 35.0],
Expand All @@ -105,6 +160,17 @@ def hartmann3(x):


def hartmann6(x):
"""
Examples
-------
>>> import numpy as np
>>> optima = [[0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573]]
>>> f_opt = -3.322368011391339
>>> np.allclose([hartmann6(optimum) for optimum in optima], f_opt)
True
"""
alpha = [1.00, 1.20, 3.00, 3.20]
A = np.array([[10.00, 3.00, 17.00, 3.50, 1.70, 8.00],
[0.05, 10.00, 17.00, 0.10, 8.00, 14.00],
Expand All @@ -125,13 +191,35 @@ def hartmann6(x):


def levy(x):
"""
Examples
-------
>>> import numpy as np
>>> optima = [[1.0]]
>>> f_opt = 0.0
>>> np.allclose([levy(optimum) for optimum in optima], f_opt)
True
"""
z = 1 + ((x[0] - 1.) / 4.)
s = np.power((np.sin(np.pi * z)), 2)
y = (s + ((z - 1) ** 2) * (1 + np.power((np.sin(2 * np.pi * z)), 2)))
return y


def rosenbrock(x):
"""
Examples
-------
>>> import numpy as np
>>> optima = [[1, 1]]
>>> f_opt = 0.0
>>> np.allclose([rosenbrock(optimum) for optimum in optima], f_opt)
True
"""
y = 0
d = 2
for i in range(d - 1):
Expand All @@ -142,10 +230,41 @@ def rosenbrock(x):


def sin_one(x):
"""
One dimensional sin function introduced in the paper:
K. Kawaguchi, L. P. Kaelbling, and T. Lozano-Perez.
Bayesian Optimization with Exponential Convergence.
In Advances in Neural Information Processing (NIPS), 2015
Examples
-------
>>> import numpy as np
>>> optima = [[0.6330131633013163]]
>>> f_opt = 0.042926342433644127
>>> np.allclose([sin_one(optimum) for optimum in optima], f_opt)
True
"""
y = 0.5 * np.sin(13 * x[0]) * np.sin(27 * x[0]) + 0.5
return y


def sin_two(x):
"""
Two dimensional sin function introduced in the paper:
K. Kawaguchi, L. P. Kaelbling, and T. Lozano-Perez.
Bayesian Optimization with Exponential Convergence.
In Advances in Neural Information Processing (NIPS), 2015
Examples
-------
>>> import numpy as np
>>> optima = [[0.6330131633013163, 0.6330131633013163]]
>>> f_opt = 0.042926342433644127 ** 2
>>> np.allclose([sin_two(optimum) for optimum in optima], f_opt)
True
"""
y = (0.5 * np.sin(13 * x[0]) * np.sin(27 * x[0]) + 0.5) * (0.5 * np.sin(13 * x[1]) * np.sin(27 * x[1]) + 0.5)
return y
1 change: 1 addition & 0 deletions pysgmcmc/diagnostics/sample_chains.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ def __init__(self, chain_id, samples, varnames=None):
['0', '1']
"""

self.chain = chain_id

assert(hasattr(samples, "__len__")), "Samples needs to have a __len__ attribute."
Expand Down
6 changes: 4 additions & 2 deletions pysgmcmc/diagnostics/sampler_diagnostics.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,9 @@ def effective_sample_sizes(get_sampler, n_chains=2, samples_per_chain=100):
>>> from pysgmcmc.samplers.sghmc import SGHMCSampler
>>> params = [tf.Variable([1.0, 2.0], name="x", dtype=tf.float64)]
>>> cost_fun = lambda params: tf.reduce_sum(params) # dummy cost functions
>>> get_sampler = lambda session: SGHMCSampler(params=params, cost_fun=cost_fun, session=session)
>>> get_sampler = lambda session: SGHMCSampler(
... params=params, cost_fun=cost_fun, session=session
... )
>>> ess_vals = effective_sample_sizes(get_sampler=get_sampler)
>>> type(ess_vals)
<class 'dict'>
Expand All @@ -112,7 +114,7 @@ def effective_sample_sizes(get_sampler, n_chains=2, samples_per_chain=100):


def gelman_rubin(get_sampler, n_chains=2, samples_per_chain=100):
"""
r"""
Calculate gelman_rubin metric for a sampler returned by callable `get_sampler`.
To do so, extract `n_chains` traces with `samples_per_chain` samples each.
Expand Down
Loading

0 comments on commit f94adb3

Please sign in to comment.