Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
chriscremer committed Feb 2, 2018
1 parent 0f01e95 commit 7e4000a
Show file tree
Hide file tree
Showing 5 changed files with 1,019 additions and 33 deletions.
117 changes: 93 additions & 24 deletions Inference_Suboptimality/gaps_over_training_exp/compute_gaps.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,15 @@

import csv

# print (sys.argv)



gpu_to_use = sys.argv[1]
epoch = sys.argv[2]
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_to_use #'1'





Expand Down Expand Up @@ -127,18 +133,44 @@ def test(model, data_x, batch_size, display, k):
# train_x = np.concatenate([train_x, valid_x], axis=0)
# print (train_x.shape)

#Load data
print ('Loading data' )
data_location = home + '/Documents/MNIST_data/'
# #Load data
# print ('Loading data' )
# data_location = home + '/Documents/MNIST_data/'
# # with open(data_location + 'binarized_mnist.pkl', 'rb') as f:
# # train_x, valid_x, test_x = pickle.load(f)
# with open(data_location + 'binarized_mnist.pkl', 'rb') as f:
# train_x, valid_x, test_x = pickle.load(f)
with open(data_location + 'binarized_mnist.pkl', 'rb') as f:
train_x, valid_x, test_x = pickle.load(f, encoding='latin1')
print ('Train', train_x.shape)
print ('Valid', valid_x.shape)
print ('Test', test_x.shape)
# train_x, valid_x, test_x = pickle.load(f, encoding='latin1')
# print ('Train', train_x.shape)
# print ('Valid', valid_x.shape)
# print ('Test', test_x.shape)




#FASHION
def load_mnist(path, kind='train'):

images_path = os.path.join(path,
'%s-images-idx3-ubyte.gz'
% kind)

with gzip.open(images_path, 'rb') as imgpath:
images = np.frombuffer(imgpath.read(), dtype=np.uint8,
offset=16).reshape(-1, 784)

return images#, labels


path = home+'/Documents/fashion_MNIST'

train_x = load_mnist(path=path)
test_x = load_mnist(path=path, kind='t10k')

train_x = train_x / 255.
test_x = test_x / 255.

print (train_x.shape)
print (test_x.shape)



Expand All @@ -163,6 +195,8 @@ def test(model, data_x, batch_size, display, k):
# print('\nModel:', hyper_config,'\n')




x_size = 784
z_size = 50
# batch_size = 20
Expand Down Expand Up @@ -204,7 +238,7 @@ def test(model, data_x, batch_size, display, k):
'encoder_arch': [[x_size,200],[200,200],[200,z_size*2]],
'decoder_arch': [[z_size,200],[200,200],[200,x_size]],
'q_dist': standard, #FFG_LN#,#hnf,#aux_nf,#flow1,#,
'cuda': 1
'cuda': gpu_to_use
}


Expand All @@ -229,7 +263,7 @@ def test(model, data_x, batch_size, display, k):


# Which gpu
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_to_use

print ('Init model')
model = VAE(hyper_config)
Expand All @@ -247,11 +281,12 @@ def test(model, data_x, batch_size, display, k):
print (model.generator)


epoch = '3280'
# epoch = '1600' # '2200' # '1300' # '100'# '3280'


print ('Load params for decoder')
path_to_load_variables=home+'/Documents/tmp/inference_suboptimality/vae_generator_'+epoch+'.pt'
path_to_load_variables=home+'/Documents/tmp/inference_suboptimality/fashion_params/reg_generator_'+epoch+'.pt'
# path_to_load_variables=home+'/Documents/tmp/inference_suboptimality/vae_generator_'+epoch+'.pt'
# path_to_load_variables=home+'/Documents/tmp/inference_suboptimality/vae_generator_3280.pt'
# path_to_load_variables=home+'/Documents/tmp/inference_suboptimality/decoder_exps/hidden_layers_4_generator_3280.pt'
# path_to_load_variables=home+'/Documents/tmp/inference_suboptimality/decoder_exps/hidden_layers_2_generator_3280.pt'
Expand Down Expand Up @@ -279,7 +314,7 @@ def test(model, data_x, batch_size, display, k):

print ('Load params for encoder')
# path_to_load_variables=home+'/Documents/tmp/inference_suboptimality/vae_encoder_100.pt'
path_to_load_variables=home+'/Documents/tmp/inference_suboptimality/vae_encoder_'+epoch+'.pt'
path_to_load_variables=home+'/Documents/tmp/inference_suboptimality/fashion_params/reg_encoder_'+epoch+'.pt'
# path_to_load_variables=home+'/Documents/tmp/inference_suboptimality/vae_smallencoder_encoder_3280.pt'
# path_to_load_variables=home+'/Documents/tmp/inference_suboptimality/vae_regencoder_encoder_3280.pt'
# path_to_load_variables=home+'/Documents/tmp/inference_suboptimality/vae_smallencoder_withflow_encoder_3280.pt'
Expand All @@ -300,6 +335,7 @@ def test(model, data_x, batch_size, display, k):




###########################
# For each datapoint, compute L[q], L[q*], log p(x)

Expand All @@ -309,7 +345,12 @@ def test(model, data_x, batch_size, display, k):

# start_time = time.time()

n_data = 1 # 100 #1000 #100
n_data = 100 #10 # 2 # #3 # 100 #1000 #100


#to save results
file_ = home+'/Documents/tmp/inference_suboptimality/over_training_exps/results_'+str(n_data)+'_fashion.txt'


vaes = []
iwaes = []
Expand Down Expand Up @@ -362,13 +403,27 @@ def test(model, data_x, batch_size, display, k):
# print ('opt iwae flex',np.mean(iwaes_flex))
# print()


with open(file_, 'a') as f:
writer = csv.writer(f, delimiter=' ')

writer.writerow(['training', epoch, 'L_q_star', np.mean(vaes)])
writer.writerow(['training', epoch, 'logpx', np.mean(iwaes)])


if compute_amort:
VAE_train = test_vae(model=model, data_x=train_x[:n_data], batch_size=np.minimum(n_data, 50), display=10, k=5000)
IW_train = test(model=model, data_x=train_x[:n_data], batch_size=np.minimum(n_data, 50), display=10, k=5000)
print ('amortized VAE',VAE_train)
print ('amortized IW',IW_train)


with open(file_, 'a') as f:
writer = csv.writer(f, delimiter=' ')

writer.writerow(['training', epoch, 'L_q', VAE_train])


# print()
# AIS_train = test_ais(model=model, data_x=train_x[:n_data], batch_size=n_data, display=2, k=50, n_intermediate_dists=500)
# print ('AIS_train',AIS_train)
Expand Down Expand Up @@ -442,6 +497,13 @@ def test(model, data_x, batch_size, display, k):
print ('opt iwae',np.mean(iwaes_test))
print()

with open(file_, 'a') as f:
writer = csv.writer(f, delimiter=' ')

writer.writerow(['validation', epoch, 'L_q_star', np.mean(vaes_test)])
writer.writerow(['validation', epoch, 'logpx', np.mean(iwaes_test)])


# print ('opt vae flex',np.mean(vaes_flex))
# print ('opt iwae flex',np.mean(iwaes_flex))
# print()
Expand All @@ -453,6 +515,12 @@ def test(model, data_x, batch_size, display, k):
print ('amortized IW',IW_test)


with open(file_, 'a') as f:
writer = csv.writer(f, delimiter=' ')

writer.writerow(['validation', epoch, 'L_q', VAE_test])




print('TRAIN')
Expand All @@ -473,19 +541,20 @@ def test(model, data_x, batch_size, display, k):


#write to file
file_ = home+'/Documents/tmp/inference_suboptimality/over_training_exps/results2.txt'
# file_ = home+'/Documents/tmp/inference_suboptimality/over_training_exps/results_50.txt'
# file_ = home+'/Documents/tmp/inference_suboptimality/over_training_exps/results_'+str(n_data)+'_fashion.txt'


with open(file_, 'a') as f:
reader = csv.reader(f, delimiter=' ')
# with open(file_, 'a') as f:
# writer = csv.writer(f, delimiter=' ')

reader.writerow(['training', epoch, 'L_q', VAE_train])
reader.writerow(['training', epoch, 'L_q_star', np.mean(vaes)])
reader.writerow(['training', epoch, 'logpx', np.mean(iwaes)])
# writer.writerow(['training', epoch, 'L_q', VAE_train])
# writer.writerow(['training', epoch, 'L_q_star', np.mean(vaes)])
# writer.writerow(['training', epoch, 'logpx', np.mean(iwaes)])

reader.writerow(['validation', epoch, 'L_q', VAE_test])
reader.writerow(['validation', epoch, 'L_q_star', np.mean(vaes_test)])
reader.writerow(['validation', epoch, 'logpx', np.mean(iwaes_test)])
# writer.writerow(['validation', epoch, 'L_q', VAE_test])
# writer.writerow(['validation', epoch, 'L_q_star', np.mean(vaes_test)])
# writer.writerow(['validation', epoch, 'logpx', np.mean(iwaes_test)])



Expand Down
Loading

0 comments on commit 7e4000a

Please sign in to comment.