Skip to content

Commit

Permalink
Merge pull request Unity-Technologies#16 from ASPePeX/keep-checkpoints
Browse files Browse the repository at this point in the history
--keep-checkpoints=<n> option for ppo.py
  • Loading branch information
cesar romero authored Sep 21, 2017
2 parents e65a83b + 0b3b667 commit b67f96f
Showing 1 changed file with 3 additions and 1 deletion.
4 changes: 3 additions & 1 deletion python/ppo.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
--learning-rate=<rate> Model learning rate [default: 3e-4].
--hidden-units=<n> Number of units in hidden layer [default: 64].
--batch-size=<n> How many experiences per gradient descent update step [default: 64].
--keep-checkpoints=<n> How many model checkpoints to keep [default: 5].
'''

options = docopt(_USAGE)
Expand All @@ -45,6 +46,7 @@
summary_freq = int(options['--summary-freq'])
save_freq = int(options['--save-freq'])
env_name = options['<env>']
keep_checkpoints = int(options['--keep-checkpoints'])

# Algorithm-specific parameters for tuning
gamma = float(options['--gamma'])
Expand Down Expand Up @@ -79,7 +81,7 @@
os.makedirs(summary_path)

init = tf.global_variables_initializer()
saver = tf.train.Saver()
saver = tf.train.Saver(max_to_keep=keep_checkpoints)

with tf.Session() as sess:
# Instantiate model parameters
Expand Down

0 comments on commit b67f96f

Please sign in to comment.