Skip to content

Commit

Permalink
Added a flag that allows users to turn off multiprocessing if it caus…
Browse files Browse the repository at this point in the history
…es problems
  • Loading branch information
JasperSnoek committed May 13, 2014
1 parent ff2d757 commit 2de5cd4
Showing 1 changed file with 24 additions and 20 deletions.
44 changes: 24 additions & 20 deletions spearmint/spearmint/chooser/GPEIOptChooser.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ class GPEIOptChooser:

def __init__(self, expt_dir, covar="Matern52", mcmc_iters=10,
pending_samples=100, noiseless=False, burnin=100,
grid_subset=20):
grid_subset=20, use_multiprocessing=True):
self.cov_func = getattr(gp, covar)
self.locker = Locker()
self.state_pkl = os.path.join(expt_dir, self.__module__ + ".pkl")
Expand All @@ -67,7 +67,7 @@ def __init__(self, expt_dir, covar="Matern52", mcmc_iters=10,
self.needs_burnin = True
self.pending_samples = int(pending_samples)
self.D = -1
self.hyper_iters = 1
self.hyper_iters = 1
# Number of points to optimize EI over
self.grid_subset = int(grid_subset)
self.noiseless = bool(int(noiseless))
Expand All @@ -77,6 +77,10 @@ def __init__(self, expt_dir, covar="Matern52", mcmc_iters=10,
self.amp2_scale = 1 # zero-mean log normal prior
self.max_ls = 2 # top-hat prior on length scales

# If multiprocessing fails or deadlocks, set this to False
self.use_multiprocessing = bool(int(use_multiprocessing))


def dump_hypers(self):
self.locker.lock_wait(self.state_pkl)

Expand Down Expand Up @@ -266,25 +270,25 @@ def next(self, grid, values, durations,
inds = np.argsort(np.mean(overall_ei,axis=1))[-self.grid_subset:]
cand2 = cand2[inds,:]

# This is old code to optimize each point in parallel. Uncomment
# and replace if multiprocessing doesn't work
#for i in xrange(0, cand2.shape[0]):
# log("Optimizing candidate %d/%d" %
# (i+1, cand2.shape[0]))
#self.check_grad_ei(cand2[i,:].flatten(), comp, pend, vals)
# ret = spo.fmin_l_bfgs_b(self.grad_optimize_ei_over_hypers,
# cand2[i,:].flatten(), args=(comp,pend,vals),
# bounds=b, disp=0)
# cand2[i,:] = ret[0]
#cand = np.vstack((cand, cand2))

# Optimize each point in parallel
pool = multiprocessing.Pool(self.grid_subset)
results = [pool.apply_async(optimize_pt,args=(
c,b,comp,pend,vals,copy.copy(self))) for c in cand2]
for res in results:
cand = np.vstack((cand, res.get(1e8)))
pool.close()
if self.use_multiprocessing:
pool = multiprocessing.Pool(self.grid_subset)
results = [pool.apply_async(optimize_pt,args=(
c,b,comp,pend,vals,copy.copy(self))) for c in cand2]
for res in results:
cand = np.vstack((cand, res.get(1e8)))
pool.close()
else:
# This is old code to optimize each point in parallel.
for i in xrange(0, cand2.shape[0]):
log("Optimizing candidate %d/%d" %
(i+1, cand2.shape[0]))
#self.check_grad_ei(cand2[i,:].flatten(), comp, pend, vals)
ret = spo.fmin_l_bfgs_b(self.grad_optimize_ei_over_hypers,
cand2[i,:].flatten(), args=(comp,pend,vals),
bounds=b, disp=0)
cand2[i,:] = ret[0]
cand = np.vstack((cand, cand2))

overall_ei = self.ei_over_hypers(comp,pend,cand,vals)
best_cand = np.argmax(np.mean(overall_ei, axis=1))
Expand Down

0 comments on commit 2de5cd4

Please sign in to comment.