Skip to content

Commit

Permalink
Update gym to gymnasium in env_portifolio.py (#1075)
Browse files Browse the repository at this point in the history
The team Gym has transitioned all future development to Gymnasium. Gym will not receive future updates.
  • Loading branch information
mmmarchetti authored Aug 27, 2023
1 parent 33a8dde commit 55a9942
Showing 1 changed file with 8 additions and 7 deletions.
15 changes: 8 additions & 7 deletions finrl/meta/env_portfolio_allocation/env_portfolio.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
from __future__ import annotations

import gym
import gymnasium as gym
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from gym import spaces
from gym.utils import seeding
from gymnasium import spaces
from gymnasium.utils import seeding
from stable_baselines3.common.vec_env import DummyVecEnv

matplotlib.use("Agg")
Expand Down Expand Up @@ -153,14 +153,15 @@ def step(self, actions):
print("Sharpe: ", sharpe)
print("=================================")

return self.state, self.reward, self.terminal, {}
return self.state, self.reward, self.terminal, False, {}

else:
# print("Model actions: ",actions)
# actions are the portfolio weight
# normalize to sum of 1
# if (np.array(actions) - np.array(actions).min()).sum() != 0:
# norm_actions = (np.array(actions) - np.array(actions).min()) / (np.array(actions) - np.array(actions).min()).sum()
# norm_actions = (np.array(actions) - np.array(actions).min()) /
# (np.array(actions) - np.array(actions).min()).sum()
# else:
# norm_actions = actions
weights = self.softmax_normalization(actions)
Expand Down Expand Up @@ -197,7 +198,7 @@ def step(self, actions):
# print("Step reward: ", self.reward)
# self.reward = self.reward*self.reward_scaling

return self.state, self.reward, self.terminal, {}
return self.state, self.reward, self.terminal, False, {}

def reset(
self,
Expand All @@ -222,7 +223,7 @@ def reset(
self.portfolio_return_memory = [0]
self.actions_memory = [[1 / self.stock_dim] * self.stock_dim]
self.date_memory = [self.data.date.unique()[0]]
return self.state
return self.state, {}

def render(self, mode="human"):
return self.state
Expand Down

0 comments on commit 55a9942

Please sign in to comment.