-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathvarying_policy_experiment.py
114 lines (101 loc) · 3.81 KB
/
varying_policy_experiment.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import numpy as np
from mdp_matrix import GridWorld
from double_sarsa import double_sarsa
from expected_sarsa import expected_sarsa
from double_expected_sarsa import double_expected_sarsa
from sarsa import sarsa
import matplotlib.pyplot as plt
# TODO: change these graphs to be over alpha like in the paper
test_rewards = [[i, j, -1] for i in range(10) for j in range(10)]
test_rewards[2] = [0, 2, 1]
test_rewards[23] = [4,3, 1]
# test_rewards = [[0, 3, 5],
# [0, 1, 10]]
print test_rewards
gw = GridWorld(10, test_rewards, terminal_states=[2, 23] )
average_reward_double_sarsa = []
all_rewards_per_episode_double_sarsa = []
average_reward_expected_sarsa = []
all_rewards_per_episode_expected_sarsa = []
average_reward_double_expected_sarsa = []
all_rewards_per_episode_double_expected_sarsa = []
average_reward_sarsa = []
all_rewards_per_episode_sarsa = []
epsilon_values = [.3, .1, .01, .001, .0001]
n= 10000
number_of_runs = 5
for r in range(number_of_runs):
for epsilon in epsilon_values:
print(epsilon)
Q, average_reward, max_reward, all_rewards, _ = double_sarsa(gw,n , epsilon=epsilon)
average_reward_double_sarsa.append(average_reward)
all_rewards_per_episode_double_sarsa.append(all_rewards)
Q, average_reward, max_reward, all_rewards, _ = expected_sarsa(gw, n, epsilon=epsilon)
average_reward_expected_sarsa.append(average_reward)
all_rewards_per_episode_expected_sarsa.append(all_rewards)
Q, average_reward, max_reward, all_rewards, _ = double_expected_sarsa(gw, n, epsilon=epsilon)
average_reward_double_expected_sarsa.append(average_reward)
all_rewards_per_episode_double_expected_sarsa.append(all_rewards)
Q, average_reward, max_reward, all_rewards, _ = sarsa(gw, n, epsilon=epsilon)
average_reward_sarsa.append(average_reward)
all_rewards_per_episode_sarsa.append(all_rewards)
# TODO: plot all sarsa, expected_sarsa, double_Sarsa
average_reward_double_sarsa = np.mean(np.split(np.array(average_reward_double_sarsa), number_of_runs), axis=0)
average_reward_expected_sarsa = np.mean(np.split(np.array(average_reward_expected_sarsa), number_of_runs), axis=0)
average_reward_double_expected_sarsa = np.mean(np.split(np.array(average_reward_double_expected_sarsa), number_of_runs), axis=0)
average_reward_sarsa = np.mean(np.split(np.array(average_reward_sarsa), number_of_runs), axis=0)
plt.plot(average_reward_double_sarsa, label="Double Sarsa")
plt.plot(average_reward_expected_sarsa, label="Expected Sarsa")
plt.plot(average_reward_double_expected_sarsa, label="Double Expected Sarsa")
plt.plot(average_reward_sarsa, label="Sarsa")
plt.ylabel('Average reward')
plt.xlabel('epsilon')
plt.xticks([x for x in range(len(average_reward_sarsa))], epsilon_values)
ax = plt.gca()
# ax.set_xscale('symlog')
ax.legend(loc='upper right', shadow=True)
plt.show()
#
# for x, e in zip(all_rewards_per_episode_double_sarsa, epsilon_values):
# # import pdb; pdb.set_trace()
# plt.plot(x, label="e=%s"%e)
#
# # break
#
# plt.ylabel('Returns per episode')
# plt.xlabel('episode')
#
# ax = plt.gca()
# # ax.set_xscale('symlog')
# ax.legend(loc='lower right', shadow=True)
# plt.show()
#
#
#
# for x, e in zip(all_rewards_per_episode_expected_sarsa, epsilon_values):
# # import pdb; pdb.set_trace()
# plt.plot(x, label="e=%s"%e)
#
# # break
#
# plt.ylabel('Returns per episode')
# plt.xlabel('episode')
#
# ax = plt.gca()
# # ax.set_xscale('symlog')
# ax.legend(loc='lower right', shadow=True)
# plt.show()
#
# for x, e in zip(all_rewards_per_episode_double_expected_sarsa, epsilon_values):
# # import pdb; pdb.set_trace()
# plt.plot(x, label="e=%s"%e)
#
# # break
#
# plt.ylabel('Returns per episode')
# plt.xlabel('episode')
#
# ax = plt.gca()
# # ax.set_xscale('symlog')
# ax.legend(loc='lower right', shadow=True)
# plt.show()