Skip to content

Commit

Permalink
Update to follow the new observation format
Browse files Browse the repository at this point in the history
(follow the vision input of OpenAI ATARI environment)
  • Loading branch information
ugo-nama-kun committed Feb 8, 2017
1 parent 6073792 commit 01a86c0
Showing 1 changed file with 5 additions and 4 deletions.
9 changes: 5 additions & 4 deletions sample_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,28 +5,29 @@ class Agent(object):
def __init__(self, dim_action):
self.dim_action = dim_action

def act(self, ob, reward, done, vision):
def act(self, ob, reward, done, vision_on):
#print("ACT!")

# Get an Observation from the environment.
# Each observation vectors are numpy array.
# focus, opponents, track sensors are scaled into [0, 1]. When the agent
# is out of the road, sensor variables return -1/200.
# rpm, wheelSpinVel are raw values and then needed to be preprocessed.
# vision is given as a tensor with size of (3, 64, 64) <-- rgb
# vision is given as a tensor with size of (64*64, 3) = (4096, 3) <-- rgb
# and values are in [0, 255]
if vision is False:
if vision_on is False:
focus, speedX, speedY, speedZ, opponents, rpm, track, wheelSpinVel = ob
else:
focus, speedX, speedY, speedZ, opponents, rpm, track, wheelSpinVel, vision = ob

""" The code below is for checking the vision input. This is very heavy for real-time Control
So you may need to remove.
"""
print(vision.shape)
"""
img = np.ndarray((64,64,3))
for i in range(3):
img[:, :, i] = 255 - vision[i]
img[:, :, i] = 255 - vision[:, i].reshape((64, 64))
plt.imshow(img, origin='lower')
plt.draw()
Expand Down

0 comments on commit 01a86c0

Please sign in to comment.