Skip to content

FMU Simulation Failed: Single-Zone Commercial Hydronic #144

@IamAniket12

Description

@IamAniket12

I'm running the run_vectorized.py script from the example folder on a different test case locally. The script executes successfully for a number of iterations, but then throws a 'simulation failed' error. I tested the same script on the bestest_hydronic_pump test case, which ran without errors. Please review the code and error message provided below.

def generate_urls_from_yml(boptest_root_dir):
    """Method that returns as many URLs for BOPTEST-Gym environments
    as those specified at the BOPTEST `docker-compose.yml` file.
    It assumes that `generateDockerComposeYml.py` has been called first.

    Parameters
    ----------
    boptest_root_dir: str
        String with directory to BOPTEST where the `docker-compose.yml`
        file should be located.

    Returns
    -------
    urls: list
        List of URLs where BOPTEST test cases will be allocated.
    """
    docker_compose_loc = os.path.join(boptest_root_dir, "docker-compose.yml")

    # Read the docker-compose.yml file
    with open(docker_compose_loc, "r") as stream:
        try:
            docker_compose_data = yaml.safe_load(stream)
            services = docker_compose_data.get("services", {})

            # Extract the port and URL of the service
            urls = []
            for service, config in services.items():
                ports = config.get("ports", [])
                for port in ports:
                    # Extract host port
                    host_port = port.split(":")[1]
                    urls.append(f"http://127.0.0.1:{host_port}")

            print(urls)  # Print URLs

        except yaml.YAMLError as exc:
            print(exc)

    return urls

def make_env(url):
    """Function that instantiates the environment.
    Parameters
    ----------
    url: string
        Rest API URL for communication with this environment.
    """
    def _init():
        env = BoptestGymEnvCustomReward(
            url=url,
            actions=["oveValCoi_u"],
            observations={
                "time": (0, 31536000),
                "reaTZon_y": (280.0, 310.0),
                "reaCO2Zon_y": (200.0, 2000.0),
                "PriceElectricPowerHighlyDynamic": (-0.4, 0.4),
                "LowerSetp[1]": (280.0, 310.0),
                "UpperSetp[1]": (280.0, 310.0),
                "UpperCO2[1]": (0, 10000),
            },
            scenario={"electricity_price": "highly_dynamic"},
            predictive_period=24 * 3600,
            random_start_time=True,
            excluding_periods=[
                (173*24*3600, 266*24*3600)
            ],
            max_episode_length=14*24*3600,
            step_period=3600,
            warmup_period=7*24*3600
        )
        env = NormalizedObservationWrapper(env)  # Add observation normalization if needed
        # env = DiscretizedActionWrapper(env, n_bins_act=15)  # Add action discretization if needed
        return env

    return _init

def train_DQN_vectorized(
    venv,
    log_dir=os.path.join("results", "PPO", "V1"),
    tensorboard_log=os.path.join("results", "PPO", "V1"),
):
    """Method to train PPO agent using vectorized environment.

    Parameters
    ----------
    venv: stable_baselines3.common.vec_env.DummyVecEnv
        Vectorized environment to be learned.
    """
    # Create logging directory if not exists. Monitoring data and agent model will be stored here
    os.makedirs(log_dir, exist_ok=True)
    env_config = {
        "url": "url",
        "actions": ["oveValCoi_u"],
        "observations": {
            "time": [0, 31536000],
            "reaTZon_y": [280.0, 310.0],
            "reaCO2Zon_y": [200.0, 2000.0],
            "PriceElectricPowerHighlyDynamic": [-0.4, 0.4],
            "LowerSetp[1]": [280.0, 310.0],
            "UpperSetp[1]": [280.0, 310.0],
            "UpperCO2[1]": (0, 10000),
        },
        "scenario": {"electricity_price": "highly_dynamic"},
        "predictive_period": 24 * 3600,
        "random_start_time": "true",
        "excluding_periods": [[173*24*3600, 266*24*3600]],
        "max_episode_length": 14*24*3600,
        "step_period": 3600,
        "action_space": "continuous",
        "warmup_period": 7*24*3600
    }

    # Modify the environment to include the callback
    venv = VecMonitor(venv=venv, filename=os.path.join(log_dir, "monitor.csv"))
    run = wandb.init(
        project="PPO",  # Replace with your project name
        sync_tensorboard=True,  # Auto-sync with TensorBoard
        config=env_config,
        name="V1",
        id="676",
        resume="allow",
    )
    print(run.id)

    # Create the callback: evaluate with one episode after 100 steps for training. We keep it very short for testing.
    eval_freq = 2000
    eval_callback = EvalCallback(
        venv,
        best_model_save_path=log_dir,
        log_path=log_dir,
        eval_freq=int(eval_freq / venv.num_envs),
        n_eval_episodes=1,
        deterministic=True,
    )
    wandb_callback = WandbCallback(
        model_save_path=log_dir,
        model_save_freq=2000,
        verbose=2,
    )
    callback = SaveAndTestCallback(
        venv, check_freq=1500, save_freq=1500, log_dir=log_dir, test=False
    )

    # Try to find CUDA core since it's optimized for parallel computing tasks
    device = "cuda" if torch.cuda.is_available() else "cpu"
    print(device)

    # Instantiate an RL agent with PPO
    model = PPO(
        "MlpPolicy",
        venv,
        learning_rate=3e-4,
        n_steps=336,
        batch_size=64,
        n_epochs=10,
        gamma=0.99,
        gae_lambda=0.95,
        clip_range=0.2,
        ent_coef=0.01,
        vf_coef=0.5,
        max_grad_norm=0.5,
        tensorboard_log=tensorboard_log,
        verbose=1,
        device=device,  # Use the appropriate device
    )

    # Set up logger with TensorBoard logging continuation
    new_logger = configure(log_dir, ["stdout", "csv", "tensorboard"])
    model.set_logger(new_logger)

    # Main training loop
    model.learn(
        total_timesteps=10000000, callback=[eval_callback, wandb_callback, callback]
    )

if __name__ == "__main__":
    boptest_root = "/home/aniket/Desktop/Codes/HVAC_Training/project1-boptest"

    # Get the argument from command line when use Linux
    boptest_root_dir = boptest_root

    # Use URLs obtained from docker-compose.yml
    urls = generate_urls_from_yml(boptest_root_dir=boptest_root_dir)

    # Create BOPTEST-Gym environment replicas
    envs = [make_env(url) for url in urls]

    # Create a vectorized environment using DummyVecEnv
    venv = SubprocVecEnv(envs)

    # Train vectorized environment
    train_DQN_vectorized(venv)


Server log:

boptest5000_1  |     res = self.fmu.simulate(start_time=start_time,
boptest5000_1  |   File "src/pyfmi/fmi.pyx", line 7573, in pyfmi.fmi.FMUModelCS2.simulate
boptest5000_1  |   File "src/pyfmi/fmi.pyx", line 378, in pyfmi.fmi.ModelBase._exec_simulate_algorithm
boptest5000_1  |   File "src/pyfmi/fmi.pyx", line 374, in pyfmi.fmi.ModelBase._exec_simulate_algorithm
boptest5000_1  |   File "/home/user/miniconda/envs/pyfmi3/lib/python3.10/site-packages/pyfmi/fmi_algorithm_drivers.py", line 1065, in solve
boptest5000_1  |     result_handler.integration_point()
boptest5000_1  |   File "/home/user/miniconda/envs/pyfmi3/lib/python3.10/site-packages/pyfmi/common/io.py", line 2650, in integration_point
boptest5000_1  |     self.dump_data_internal.save_point()
boptest5000_1  |   File "src/pyfmi/fmi_util.pyx", line 1087, in pyfmi.fmi_util.DumpData.save_point
boptest5000_1  |   File "src/pyfmi/fmi.pyx", line 4203, in pyfmi.fmi.FMUModelBase2.get_real
boptest5000_1  |   File "src/pyfmi/fmi.pyx", line 4235, in pyfmi.fmi.FMUModelBase2.get_real
boptest5000_1  | pyfmi.fmi.FMUException: Failed to get the Real values.
boptest5000_1  | .

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions