Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions examples/ppo/seed_rl_atari_preprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@
"""

import cv2
from gymnasium.spaces.box import Box
import gymnasium as gym
from gymnasium.spaces.box import Box
import numpy as np


Expand Down Expand Up @@ -117,7 +117,7 @@ def action_space(self):

@property
def reward_range(self):
return self.environment.reward_range
return self.environment.reward_range # type: ignore

@property
def metadata(self):
Expand Down Expand Up @@ -189,7 +189,7 @@ def step(self, action):
# We bypass the Gym observation altogether and directly fetch the
# grayscale image from the ALE. This is a little faster.
_, reward, game_over, _, info = self.environment.step(action)
accumulated_reward += reward
accumulated_reward += float(reward)

if self.terminal_on_life_loss:
new_lives = self.environment.unwrapped.ale.lives() # pytype:disable=attribute-error
Expand Down
Loading