Skip to content

Commit 55a9942

Browse files
authored
Update gym to gymnasium in env_portifolio.py (#1075)
The team Gym has transitioned all future development to Gymnasium. Gym will not receive future updates.
1 parent 33a8dde commit 55a9942

File tree

1 file changed

+8
-7
lines changed

1 file changed

+8
-7
lines changed

finrl/meta/env_portfolio_allocation/env_portfolio.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
from __future__ import annotations
22

3-
import gym
3+
import gymnasium as gym
44
import matplotlib
55
import matplotlib.pyplot as plt
66
import numpy as np
77
import pandas as pd
8-
from gym import spaces
9-
from gym.utils import seeding
8+
from gymnasium import spaces
9+
from gymnasium.utils import seeding
1010
from stable_baselines3.common.vec_env import DummyVecEnv
1111

1212
matplotlib.use("Agg")
@@ -153,14 +153,15 @@ def step(self, actions):
153153
print("Sharpe: ", sharpe)
154154
print("=================================")
155155

156-
return self.state, self.reward, self.terminal, {}
156+
return self.state, self.reward, self.terminal, False, {}
157157

158158
else:
159159
# print("Model actions: ",actions)
160160
# actions are the portfolio weight
161161
# normalize to sum of 1
162162
# if (np.array(actions) - np.array(actions).min()).sum() != 0:
163-
# norm_actions = (np.array(actions) - np.array(actions).min()) / (np.array(actions) - np.array(actions).min()).sum()
163+
# norm_actions = (np.array(actions) - np.array(actions).min()) /
164+
# (np.array(actions) - np.array(actions).min()).sum()
164165
# else:
165166
# norm_actions = actions
166167
weights = self.softmax_normalization(actions)
@@ -197,7 +198,7 @@ def step(self, actions):
197198
# print("Step reward: ", self.reward)
198199
# self.reward = self.reward*self.reward_scaling
199200

200-
return self.state, self.reward, self.terminal, {}
201+
return self.state, self.reward, self.terminal, False, {}
201202

202203
def reset(
203204
self,
@@ -222,7 +223,7 @@ def reset(
222223
self.portfolio_return_memory = [0]
223224
self.actions_memory = [[1 / self.stock_dim] * self.stock_dim]
224225
self.date_memory = [self.data.date.unique()[0]]
225-
return self.state
226+
return self.state, {}
226227

227228
def render(self, mode="human"):
228229
return self.state

0 commit comments

Comments
 (0)