Skip to content

Commit

Permalink
Merge branch 'devel' of github.com:SheffieldML/GPy into devel
Browse files Browse the repository at this point in the history
  • Loading branch information
nfusi committed Jul 17, 2013
2 parents 8da0785 + 09ee9e5 commit fa523c3
Show file tree
Hide file tree
Showing 7 changed files with 105 additions and 41 deletions.
3 changes: 2 additions & 1 deletion GPy/core/gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,8 @@ def log_likelihood(self):
model for a new variable Y* = v_tilde/tau_tilde, with a covariance
matrix K* = K + diag(1./tau_tilde) plus a normalization term.
"""
return - 0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) - 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z
return (-0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) -
0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z)


def _log_likelihood_gradients(self):
Expand Down
6 changes: 3 additions & 3 deletions GPy/core/sparse_gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def _computations(self):
self.B = np.eye(self.num_inducing) + self.A
self.LB = jitchol(self.B)

#VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency!
# VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency!
self.psi1Vf = np.dot(self.psi1.T, self.likelihood.VVT_factor)

# back substutue C into psi1Vf
Expand Down Expand Up @@ -163,7 +163,7 @@ def _computations(self):
def log_likelihood(self):
""" Compute the (lower bound on the) log marginal likelihood """
if self.likelihood.is_heteroscedastic:
A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V*self.likelihood.Y)
A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)
B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A))
else:
A = -0.5 * self.num_data * self.output_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT
Expand Down Expand Up @@ -246,7 +246,7 @@ def _raw_predict(self, Xnew, X_variance_new=None, which_parts='all', full_cov=Fa
Kmmi_LmiBLmi = backsub_both_sides(self.Lm, np.eye(self.num_inducing) - Bi)

if self.Cpsi1V is None:
psi1V = np.dot(self.psi1.T,self.likelihood.V)
psi1V = np.dot(self.psi1.T, self.likelihood.V)
tmp, _ = dtrtrs(self.Lm, np.asfortranarray(psi1V), lower=1, trans=0)
tmp, _ = dpotrs(self.LB, tmp, lower=1)
self.Cpsi1V, _ = dtrtrs(self.Lm, tmp, lower=1, trans=1)
Expand Down
51 changes: 27 additions & 24 deletions GPy/examples/dimensionality_reduction.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,26 +7,29 @@
import GPy
from GPy.core.transformations import logexp
from GPy.models.bayesian_gplvm import BayesianGPLVM
from GPy.likelihoods.gaussian import Gaussian

default_seed = np.random.seed(123344)

def BGPLVM(seed=default_seed):
N = 10
num_inducing = 3
Q = 2
D = 4
Q = 5
D = 10
# generate GPLVM-like data
X = np.random.rand(N, Q)
k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001)
lengthscales = np.random.rand(Q)
k = GPy.kern.rbf(Q, .5, lengthscales, ARD=True) + GPy.kern.white(Q, 0.01)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N), K, Q).T
lik = Gaussian(Y, normalize=True)

k = GPy.kern.rbf(Q, ARD=True) + GPy.kern.linear(Q, ARD=True) + GPy.kern.rbf(Q, ARD=True) + GPy.kern.white(Q)
# k = GPy.kern.rbf(Q) + GPy.kern.rbf(Q) + GPy.kern.white(Q)
k = GPy.kern.rbf_inv(Q, ARD=True) + GPy.kern.bias(Q) + GPy.kern.white(Q)
# k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
# k = GPy.kern.rbf(Q, ARD = False) + GPy.kern.white(Q, 0.00001)

m = GPy.models.BayesianGPLVM(Y, Q, kernel=k, num_inducing=num_inducing)
m = GPy.models.BayesianGPLVM(lik, Q, kernel=k, num_inducing=num_inducing)
m.lengthscales = lengthscales
# m.constrain_positive('(rbf|bias|noise|white|S)')
# m.constrain_fixed('S', 1)

Expand All @@ -37,8 +40,8 @@ def BGPLVM(seed=default_seed):
# m.optimize(messages = 1)
# m.plot()
# pb.title('After optimisation')
m.randomize()
m.checkgrad(verbose=1)
# m.randomize()
# m.checkgrad(verbose=1)

return m

Expand Down Expand Up @@ -70,16 +73,16 @@ def sparseGPLVM_oil(optimize=True, N=100, Q=6, num_inducing=15, max_iters=50):

# create simple GP model
kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q)
m = GPy.models.SparseGPLVM(Y, Q, kernel=kernel, num_inducing = num_inducing)
m = GPy.models.SparseGPLVM(Y, Q, kernel=kernel, num_inducing=num_inducing)
m.data_labels = data['Y'].argmax(axis=1)

# optimize
if optimize:
m.optimize('scg', messages=1, max_iters = max_iters)
m.optimize('scg', messages=1, max_iters=max_iters)

# plot
print(m)
#m.plot_latent(labels=m.data_labels)
# m.plot_latent(labels=m.data_labels)
return m

def swiss_roll(optimize=True, N=1000, num_inducing=15, Q=4, sigma=.2, plot=False):
Expand Down Expand Up @@ -136,12 +139,13 @@ def swiss_roll(optimize=True, N=1000, num_inducing=15, Q=4, sigma=.2, plot=False
m.optimize('scg', messages=1)
return m

def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_iters=50, plot=False, **k):
def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_iters=150, plot=False, **k):
np.random.seed(0)
data = GPy.util.datasets.oil()

# create simple GP model
kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q, np.exp(-2)) + GPy.kern.white(Q, np.exp(-2))
kernel = GPy.kern.rbf_inv(Q, ARD=True) + GPy.kern.bias(Q, np.exp(-2)) + GPy.kern.white(Q, np.exp(-2))
kernel += GPy.kern.rbf_inv(Q, ARD=True) + GPy.kern.bias(Q, np.exp(-2)) + GPy.kern.white(Q, np.exp(-2))
Y = data['X'][:N]
Yn = Y - Y.mean(0)
Yn /= Yn.std(0)
Expand All @@ -150,14 +154,14 @@ def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_iters=50, plot=F
m.data_labels = data['Y'][:N].argmax(axis=1)

# m.constrain('variance|leng', logexp_clipped())
m['.*lengt'] = 1. # m.X.var(0).max() / m.X.var(0)
# m['.*lengt'] = m.X.var(0).max() / m.X.var(0)
m['noise'] = Yn.var() / 100.


# optimize
if optimize:
m.constrain_fixed('noise')
m.optimize('scg', messages=1, max_iters=100, gtol=.05)
m.optimize('scg', messages=1, max_iters=200, gtol=.05)
m.constrain_positive('noise')
m.optimize('scg', messages=1, max_iters=max_iters, gtol=.05)

Expand Down Expand Up @@ -208,7 +212,7 @@ def _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim=False):

Y1 += .3 * np.random.randn(*Y1.shape)
Y2 += .2 * np.random.randn(*Y2.shape)
Y3 += .1 * np.random.randn(*Y3.shape)
Y3 += .25 * np.random.randn(*Y3.shape)

Y1 -= Y1.mean(0)
Y2 -= Y2.mean(0)
Expand Down Expand Up @@ -263,24 +267,23 @@ def bgplvm_simulation_matlab_compare():

def bgplvm_simulation(optimize='scg',
plot=True,
max_iters=2e4):
max_iters=2e4,
plot_sim=False):
# from GPy.core.transformations import logexp_clipped
D1, D2, D3, N, num_inducing, Q = 15, 8, 8, 100, 3, 5
slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot)
D1, D2, D3, N, num_inducing, Q = 15, 5, 8, 300, 30, 6
slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim)

from GPy.models import mrd
from GPy import kern
reload(mrd); reload(kern)


Y = Ylist[0]

k = kern.linear(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2)) # + kern.bias(Q)
m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k)

# m.constrain('variance|noise', logexp_clipped())
m['noise'] = Y.var() / 100.
m['linear_variance'] = .01

if optimize:
print "Optimizing model:"
Expand Down Expand Up @@ -346,7 +349,7 @@ def brendan_faces():
def stick_play(range=None, frame_rate=15):
data = GPy.util.datasets.stick()
# optimize
if range==None:
if range == None:
Y = data['Y'].copy()
else:
Y = data['Y'][range[0]:range[1], :].copy()
Expand Down Expand Up @@ -374,10 +377,10 @@ def stick_bgplvm(model=None):
data = GPy.util.datasets.stick()
Q = 6
kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q, np.exp(-2)) + GPy.kern.white(Q, np.exp(-2))
m = BayesianGPLVM(data['Y'], Q, init="PCA", num_inducing=20,kernel=kernel)
m = BayesianGPLVM(data['Y'], Q, init="PCA", num_inducing=20, kernel=kernel)
# optimize
m.ensure_default_constraints()
m.optimize(messages=1, max_f_eval=3000,xtol=1e-300,ftol=1e-300)
m.optimize(messages=1, max_f_eval=3000, xtol=1e-300, ftol=1e-300)
m._set_params(m._get_params())
plt.clf, (latent_axes, sense_axes) = plt.subplots(1, 2)
plt.sca(latent_axes)
Expand Down
49 changes: 43 additions & 6 deletions GPy/kern/kern.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,12 +66,26 @@ def setstate(self, state):
Parameterized.setstate(self, state)


def plot_ARD(self, fignum=None, ax=None, title=None):
"""If an ARD kernel is present, it bar-plots the ARD parameters"""
def plot_ARD(self, fignum=None, ax=None, title='', legend=False):
"""If an ARD kernel is present, it bar-plots the ARD parameters,
:param fignum: figure number of the plot
:param ax: matplotlib axis to plot on
:param title:
title of the plot,
pass '' to not print a title
pass None for a generic title
"""
if ax is None:
fig = pb.figure(fignum)
ax = fig.add_subplot(111)
from GPy.util import Tango
from matplotlib.textpath import TextPath
Tango.reset()
xticklabels = []
bars = []
x0 = 0
for p in self.parts:
c = Tango.nextMedium()
if hasattr(p, 'ARD') and p.ARD:
if title is None:
ax.set_title('ARD parameters, %s kernel' % p.name)
Expand All @@ -82,10 +96,32 @@ def plot_ARD(self, fignum=None, ax=None, title=None):
else:
ard_params = 1. / p.lengthscale

x = np.arange(len(ard_params))
ax.bar(x - 0.4, ard_params)
ax.set_xticks(x)
ax.set_xticklabels([r"${}$".format(i) for i in x])
x = np.arange(x0, x0 + len(ard_params))
bars.append(ax.bar(x, ard_params, align='center', color=c, edgecolor='k', linewidth=1.2, label=p.name))
xticklabels.extend([r"$\mathrm{{{name}}}\ {x}$".format(name=p.name, x=i) for i in np.arange(len(ard_params))])
x0 += len(ard_params)
x = np.arange(x0)
for bar in bars:
for patch, num in zip(bar.patches, np.arange(len(bar.patches))):
height = patch.get_height()
xi = patch.get_x() + patch.get_width() / 2.
va = 'top'
c = 'w'
t = TextPath((0, 0), "${xi}$".format(xi=xi), rotation=0, usetex=True, ha='center')
if patch.get_extents().height <= t.get_extents().height + 2:
va = 'bottom'
c = 'k'
ax.text(xi, height, "${xi}$".format(xi=int(num)), color=c, rotation=0, ha='center', va=va)
# for xi, t in zip(x, xticklabels):
# ax.text(xi, maxi / 2, t, rotation=90, ha='center', va='center')
# ax.set_xticklabels(xticklabels, rotation=17)
ax.set_xticks([])
ax.set_xlim(-.5, x0 - .5)
if title is '':
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=len(bars), mode="expand", borderaxespad=0.)
else:
ax.legend()
return ax

def _transform_gradients(self, g):
Expand Down Expand Up @@ -361,6 +397,7 @@ def dpsi2_dtheta(self, dL_dpsi2, Z, mu, S):

# compute the "cross" terms
# TODO: better looping, input_slices

for i1, i2 in itertools.permutations(range(len(self.parts)), 2):
p1, p2 = self.parts[i1], self.parts[i2]
# ipsl1, ipsl2 = self.input_slices[i1], self.input_slices[i2]
Expand Down
4 changes: 2 additions & 2 deletions GPy/models/bayesian_gplvm.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,8 @@ def _log_likelihood_gradients(self):
self.dbound_dZtheta = SparseGP._log_likelihood_gradients(self)
return np.hstack((self.dbound_dmuS.flatten(), self.dbound_dZtheta))

def plot_latent(self, *args, **kwargs):
return plot_latent.plot_latent(self, *args, **kwargs)
def plot_latent(self, plot_inducing=True, *args, **kwargs):
return plot_latent.plot_latent(self, plot_inducing=plot_inducing, *args, **kwargs)

def do_test_latents(self, Y):
"""
Expand Down
6 changes: 3 additions & 3 deletions GPy/models/gplvm.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,10 @@ def __init__(self, Y, input_dim, init='PCA', X=None, kernel=None, normalize_Y=Fa
self.ensure_default_constraints()

def initialise_latent(self, init, input_dim, Y):
Xr = np.random.randn(Y.shape[0], input_dim)
if init == 'PCA':
return PCA(Y, input_dim)[0]
else:
return np.random.randn(Y.shape[0], input_dim)
Xr[:, :Y.shape[1]] = PCA(Y, input_dim)[0]
return Xr

def getstate(self):
return GP.getstate(self)
Expand Down
27 changes: 25 additions & 2 deletions GPy/testing/unit_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def setUp(self):
self.X2D = np.random.uniform(-3., 3., (40, 2))
self.Y2D = np.sin(self.X2D[:, 0:1]) * np.sin(self.X2D[:, 1:2]) + np.random.randn(40, 1) * 0.05

def check_model_with_white(self, kern, model_type='GPRegression', dimension=1):
def check_model_with_white(self, kern, model_type='GPRegression', dimension=1, uncertain_inputs=False):
# Get the correct gradients
if dimension == 1:
X = self.X1D
Expand All @@ -36,7 +36,10 @@ def check_model_with_white(self, kern, model_type='GPRegression', dimension=1):

noise = GPy.kern.white(dimension)
kern = kern + noise
m = model_fit(X, Y, kernel=kern)
if uncertain_inputs:
m = model_fit(X, Y, kernel=kern, X_variance=np.random.rand(X.shape[0], X.shape[1]))
else:
m = model_fit(X, Y, kernel=kern)
m.randomize()
# contrain all parameters to be positive
self.assertTrue(m.checkgrad())
Expand Down Expand Up @@ -141,6 +144,26 @@ def test_SparseGPRegression_rbf_white_kern_2D(self):
rbf = GPy.kern.rbf(2)
self.check_model_with_white(rbf, model_type='SparseGPRegression', dimension=2)

def test_SparseGPRegression_rbf_linear_white_kern_1D(self):
''' Testing the sparse GP regression with rbf and white kernel on 2d data '''
rbflin = GPy.kern.rbf(1) + GPy.kern.linear(1)
self.check_model_with_white(rbflin, model_type='SparseGPRegression', dimension=1)

def test_SparseGPRegression_rbf_linear_white_kern_2D(self):
''' Testing the sparse GP regression with rbf and white kernel on 2d data '''
rbflin = GPy.kern.rbf(2) + GPy.kern.linear(2)
self.check_model_with_white(rbflin, model_type='SparseGPRegression', dimension=2)

def test_SparseGPRegression_rbf_linear_white_kern_2D_uncertain_inputs(self):
''' Testing the sparse GP regression with rbf, linear and white kernel on 2d data with uncertain inputs'''
rbflin = GPy.kern.rbf(2) + GPy.kern.linear(2)
self.check_model_with_white(rbflin, model_type='SparseGPRegression', dimension=2, uncertain_inputs=1)

def test_SparseGPRegression_rbf_linear_white_kern_1D_uncertain_inputs(self):
''' Testing the sparse GP regression with rbf, linear and white kernel on 1d data with uncertain inputs'''
rbflin = GPy.kern.rbf(1) + GPy.kern.linear(1)
self.check_model_with_white(rbflin, model_type='SparseGPRegression', dimension=1, uncertain_inputs=1)

def test_GPLVM_rbf_bias_white_kern_2D(self):
""" Testing GPLVM with rbf + bias and white kernel """
N, input_dim, D = 50, 1, 2
Expand Down

0 comments on commit fa523c3

Please sign in to comment.