forked from brett-daley/gym-classics
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdynamic_programming.py
77 lines (53 loc) · 2.05 KB
/
dynamic_programming.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import argparse
import gym
import numpy as np
def value_iteration(env, discount, precision=1e-3):
assert 0.0 <= discount <= 1.0
assert precision > 0.0
V = np.zeros(env.observation_space.n, dtype=np.float64)
while True:
V_old = V.copy()
for s in env.states():
Q_values = [backup(env, discount, V, s, a) for a in env.actions()]
V[s] = max(Q_values)
if np.abs(V - V_old).max() <= precision:
return V
def policy_iteration(env, discount, precision=1e-3):
assert 0.0 <= discount <= 1.0
assert precision > 0.0
# For the sake of determinism, we start with the policy that always chooses action 0
policy = np.zeros(env.observation_space.n, dtype=np.int32)
while True:
V_policy = policy_evaluation(env, discount, policy, precision)
policy, stable = policy_improvement(env, discount, policy, V_policy, precision)
if stable:
return policy
def policy_evaluation(env, discount, policy, precision=1e-3):
assert 0.0 <= discount <= 1.0
assert precision > 0.0
V = np.zeros(policy.shape, dtype=np.float64)
while True:
V_old = V.copy()
for s in env.states():
V[s] = backup(env, discount, V, s, policy[s])
if np.abs(V - V_old).max() <= precision:
return V
####################
# Helper functions #
####################
def policy_improvement(env, discount, policy, V_policy, precision=1e-3):
policy_old = policy.copy()
V_old = V_policy.copy()
for s in env.states():
Q_values = [backup(env, discount, V_policy, s, a) for a in env.actions()]
policy[s] = np.argmax(Q_values)
V_policy[s] = max(Q_values)
stable = np.logical_or(
policy == policy_old,
np.abs(V_policy - V_old).max() <= precision,
).all()
return policy, stable
def backup(env, discount, V, state, action):
next_states, rewards, dones, probs = env.model(state, action)
bootstraps = (1.0 - dones) * V[next_states]
return np.sum(probs * (rewards + discount * bootstraps))