diff --git a/hyperparams/a2c.yml b/hyperparams/a2c.yml index ba9416e85..04af6c6af 100644 --- a/hyperparams/a2c.yml +++ b/hyperparams/a2c.yml @@ -19,6 +19,12 @@ CartPole-v1: policy: 'MlpPolicy' ent_coef: 0.0 +seals/CartPole-v0: + n_envs: 8 + n_timesteps: !!float 5e5 + policy: 'MlpPolicy' + ent_coef: 0.0 + LunarLander-v2: n_envs: 8 n_timesteps: !!float 2e5 @@ -35,6 +41,13 @@ MountainCar-v0: policy: 'MlpPolicy' ent_coef: .0 +seals/MountainCar-v0: + normalize: true + n_envs: 16 + n_timesteps: !!float 1e6 + policy: 'MlpPolicy' + ent_coef: .0 + Acrobot-v1: normalize: true n_envs: 16 @@ -170,19 +183,39 @@ HalfCheetah-v3: &mujoco-defaults n_timesteps: !!float 1e6 policy: 'MlpPolicy' +seals/HalfCheetah-v0: + <<: *mujoco-defaults + Ant-v3: <<: *mujoco-defaults +seals/Ant-v0: + <<: *mujoco-defaults + Hopper-v3: <<: *mujoco-defaults +seals/Hopper-v0: + <<: *mujoco-defaults + Walker2d-v3: <<: *mujoco-defaults +seals/Walker2d-v0: + <<: *mujoco-defaults + Humanoid-v3: <<: *mujoco-defaults n_timesteps: !!float 2e6 +seals/Humanoid-v0: + <<: *mujoco-defaults + n_timesteps: !!float 2e6 + Swimmer-v3: <<: *mujoco-defaults gamma: 0.9999 + +seals/Swimmer-v0: + <<: *mujoco-defaults + gamma: 0.9999 diff --git a/hyperparams/ars.yml b/hyperparams/ars.yml index e58d4fa3c..eb89cc211 100644 --- a/hyperparams/ars.yml +++ b/hyperparams/ars.yml @@ -5,6 +5,12 @@ CartPole-v1: policy: 'LinearPolicy' n_delta: 2 +seals/CartPole-v0: + n_envs: 1 + n_timesteps: !!float 5e4 + policy: 'LinearPolicy' + n_delta: 2 + # Tuned Pendulum-v1: &pendulum-params n_envs: 1 @@ -41,6 +47,11 @@ MountainCar-v0: n_delta: 8 n_timesteps: !!float 5e5 +seals/MountainCar-v0: + <<: *pendulum-params + n_delta: 8 + n_timesteps: !!float 5e5 + # Tuned MountainCarContinuous-v0: <<: *pendulum-params @@ -119,6 +130,17 @@ Swimmer-v3: alive_bonus_offset: 0 # normalize: "dict(norm_obs=True, norm_reward=False)" +seals/Swimmer-v0: + n_envs: 1 + policy: 'LinearPolicy' + n_timesteps: !!float 2e6 + learning_rate: !!float 0.02 + delta_std: !!float 0.01 + n_delta: 1 + n_top: 1 + alive_bonus_offset: 0 + # normalize: "dict(norm_obs=True, norm_reward=False)" + Hopper-v3: n_envs: 1 policy: 'LinearPolicy' @@ -130,6 +152,17 @@ Hopper-v3: alive_bonus_offset: -1 normalize: "dict(norm_obs=True, norm_reward=False)" +seals/Hopper-v0: + n_envs: 1 + policy: 'LinearPolicy' + n_timesteps: !!float 7e6 + learning_rate: !!float 0.01 + delta_std: !!float 0.025 + n_delta: 8 + n_top: 4 + alive_bonus_offset: -1 + normalize: "dict(norm_obs=True, norm_reward=False)" + HalfCheetah-v3: n_envs: 1 policy: 'LinearPolicy' @@ -141,6 +174,17 @@ HalfCheetah-v3: alive_bonus_offset: 0 normalize: "dict(norm_obs=True, norm_reward=False)" +seals/HalfCheetah-v0: + n_envs: 1 + policy: 'LinearPolicy' + n_timesteps: !!float 1.25e7 + learning_rate: !!float 0.02 + delta_std: !!float 0.03 + n_delta: 32 + n_top: 4 + alive_bonus_offset: 0 + normalize: "dict(norm_obs=True, norm_reward=False)" + Walker2d-v3: n_envs: 1 policy: 'LinearPolicy' @@ -152,6 +196,17 @@ Walker2d-v3: alive_bonus_offset: -1 normalize: "dict(norm_obs=True, norm_reward=False)" +seals/Walker2d-v0: + n_envs: 1 + policy: 'LinearPolicy' + n_timesteps: !!float 7.5e7 + learning_rate: !!float 0.03 + delta_std: !!float 0.025 + n_delta: 40 + n_top: 30 + alive_bonus_offset: -1 + normalize: "dict(norm_obs=True, norm_reward=False)" + Ant-v3: n_envs: 1 policy: 'LinearPolicy' @@ -163,6 +218,17 @@ Ant-v3: alive_bonus_offset: -1 normalize: "dict(norm_obs=True, norm_reward=False)" +seals/Ant-v0: + n_envs: 1 + policy: 'LinearPolicy' + n_timesteps: !!float 7.5e7 + learning_rate: !!float 0.015 + delta_std: !!float 0.025 + n_delta: 60 + n_top: 20 + alive_bonus_offset: -1 + normalize: "dict(norm_obs=True, norm_reward=False)" + Humanoid-v3: n_envs: 1 @@ -175,6 +241,17 @@ Humanoid-v3: alive_bonus_offset: -5 normalize: "dict(norm_obs=True, norm_reward=False)" +seals/Humanoid-v0: + n_envs: 1 + policy: 'LinearPolicy' + n_timesteps: !!float 2.5e8 + learning_rate: 0.02 + delta_std: 0.0075 + n_delta: 256 + n_top: 256 + alive_bonus_offset: -5 + normalize: "dict(norm_obs=True, norm_reward=False)" + # Almost tuned BipedalWalker-v3: n_envs: 1 diff --git a/hyperparams/ddpg.yml b/hyperparams/ddpg.yml index 14a53cfca..996cee226 100644 --- a/hyperparams/ddpg.yml +++ b/hyperparams/ddpg.yml @@ -131,21 +131,43 @@ HalfCheetah-v3: &mujoco-defaults noise_type: 'normal' noise_std: 0.1 +seals/HalfCheetah-v0: + <<: *mujoco-defaults + Ant-v3: <<: *mujoco-defaults +seals/Ant-v0: + <<: *mujoco-defaults + Hopper-v3: <<: *mujoco-defaults +seals/Hopper-v0: + <<: *mujoco-defaults + Walker2d-v3: <<: *mujoco-defaults +seals/Walker2d-v0: + <<: *mujoco-defaults + Humanoid-v3: <<: *mujoco-defaults n_timesteps: !!float 2e6 +seals/Humanoid-v0: + <<: *mujoco-defaults + n_timesteps: !!float 2e6 + Swimmer-v3: <<: *mujoco-defaults gamma: 0.9999 train_freq: 1 gradient_steps: 1 + +seals/Swimmer-v0: + <<: *mujoco-defaults + gamma: 0.9999 + train_freq: 1 + gradient_steps: 1 diff --git a/hyperparams/dqn.yml b/hyperparams/dqn.yml index b91d6c474..5f63ac67b 100644 --- a/hyperparams/dqn.yml +++ b/hyperparams/dqn.yml @@ -33,6 +33,21 @@ CartPole-v1: exploration_final_eps: 0.04 policy_kwargs: "dict(net_arch=[256, 256])" +seals/CartPole-v0: + n_timesteps: !!float 5e4 + policy: 'MlpPolicy' + learning_rate: !!float 2.3e-3 + batch_size: 64 + buffer_size: 100000 + learning_starts: 1000 + gamma: 0.99 + target_update_interval: 10 + train_freq: 256 + gradient_steps: 128 + exploration_fraction: 0.16 + exploration_final_eps: 0.04 + policy_kwargs: "dict(net_arch=[256, 256])" + # Tuned MountainCar-v0: n_timesteps: !!float 1.2e5 @@ -49,6 +64,21 @@ MountainCar-v0: exploration_final_eps: 0.07 policy_kwargs: "dict(net_arch=[256, 256])" +seals/MountainCar-v0: + n_timesteps: !!float 1.2e5 + policy: 'MlpPolicy' + learning_rate: !!float 4e-3 + batch_size: 128 + buffer_size: 10000 + learning_starts: 1000 + gamma: 0.98 + target_update_interval: 600 + train_freq: 16 + gradient_steps: 8 + exploration_fraction: 0.2 + exploration_final_eps: 0.07 + policy_kwargs: "dict(net_arch=[256, 256])" + # Tuned LunarLander-v2: n_timesteps: !!float 1e5 diff --git a/hyperparams/ppo.yml b/hyperparams/ppo.yml index 10909fbe8..1d0d3b5d2 100644 --- a/hyperparams/ppo.yml +++ b/hyperparams/ppo.yml @@ -42,6 +42,23 @@ CartPole-v1: learning_rate: lin_0.001 clip_range: lin_0.2 +# Tuned +seals/CartPole-v0: + n_envs: 8 + n_timesteps: !!float 1e5 + policy: 'MlpPolicy' + batch_size: 256 + clip_range: 0.4 + ent_coef: 0.008508727919228772 + gae_lambda: 0.9 + gamma: 0.9999 + learning_rate: 0.0012403278189645594 + max_grad_norm: 0.8 + n_epochs: 10 + n_steps: 512 + policy_kwargs: dict(activation_fn=nn.ReLU, net_arch=[dict(pi=[64, 64], vf=[64, 64])]) + vf_coef: 0.489343896591493 + MountainCar-v0: normalize: true n_envs: 16 @@ -53,6 +70,24 @@ MountainCar-v0: n_epochs: 4 ent_coef: 0.0 +# Tuned +seals/MountainCar-v0: + normalize: true + n_envs: 16 + n_timesteps: !!float 1e6 + policy: 'MlpPolicy' + batch_size: 512 + clip_range: 0.2 + ent_coef: 6.4940755116195606e-06 + gae_lambda: 0.98 + gamma: 0.99 + learning_rate: 0.0004476103728105138 + max_grad_norm: 1 + n_epochs: 20 + n_steps: 256 + policy_kwargs: dict(activation_fn=nn.Tanh, net_arch=[dict(pi=[64, 64], vf=[64, 64])]) + vf_coef: 0.25988158989488963 + # Tuned MountainCarContinuous-v0: normalize: true @@ -360,6 +395,21 @@ Ant-v3: &mujoco-defaults n_timesteps: !!float 1e6 policy: 'MlpPolicy' +# Tuned +seals/Ant-v0: + <<: *mujoco-defaults + batch_size: 16 + clip_range: 0.3 + ent_coef: 3.1441389214159857e-06 + gae_lambda: 0.8 + gamma: 0.995 + learning_rate: 0.00017959211641976886 + max_grad_norm: 0.9 + n_epochs: 10 + n_steps: 2048 + policy_kwargs: dict(activation_fn=nn.Tanh, net_arch=[dict(pi=[64, 64], vf=[64, 64])]) + vf_coef: 0.4351450387648799 + # Hopper-v3: # <<: *mujoco-defaults # @@ -374,6 +424,21 @@ Swimmer-v3: <<: *mujoco-defaults gamma: 0.9999 +# Tuned +seals/Swimmer-v0: + <<: *mujoco-defaults + batch_size: 8 + clip_range: 0.1 + ent_coef: 5.167107294612664e-08 + gae_lambda: 0.95 + gamma: 0.999 + learning_rate: 0.0001214437022727675 + max_grad_norm: 2 + n_epochs: 20 + n_steps: 2048 + policy_kwargs: dict(activation_fn=nn.Tanh, net_arch=[dict(pi=[64, 64], vf=[64, 64])]) + vf_coef: 0.6162112311062333 + # Tuned # 10 mujoco envs @@ -399,6 +464,24 @@ HalfCheetah-v3: net_arch=[dict(pi=[256, 256], vf=[256, 256])] )" +# Tuned +seals/HalfCheetah-v0: + normalize: true + n_envs: 1 + policy: 'MlpPolicy' + n_timesteps: !!float 1e6 + batch_size: 64 + clip_range: 0.1 + ent_coef: 3.794797423594763e-06 + gae_lambda: 0.95 + gamma: 0.95 + learning_rate: 0.0003286871805949382 + max_grad_norm: 0.8 + n_epochs: 5 + n_steps: 512 + policy_kwargs: dict(activation_fn=nn.Tanh, net_arch=[dict(pi=[64, 64], vf=[64, 64])]) + vf_coef: 0.11483689492120866 + # Ant-v3: # normalize: true # n_envs: 1 @@ -437,6 +520,24 @@ Hopper-v3: net_arch=[dict(pi=[256, 256], vf=[256, 256])] )" +# Tuned +seals/Hopper-v0: + normalize: true + n_envs: 1 + policy: 'MlpPolicy' + n_timesteps: !!float 1e6 + batch_size: 512 + clip_range: 0.1 + ent_coef: 0.0010159833764878474 + gae_lambda: 0.98 + gamma: 0.995 + learning_rate: 0.0003904770450788824 + max_grad_norm: 0.9 + n_epochs: 20 + n_steps: 2048 + policy_kwargs: dict(activation_fn=nn.ReLU, net_arch=[dict(pi=[64, 64], vf=[64, 64])]) + vf_coef: 0.20315938606555833 + HumanoidStandup-v3: normalize: true n_envs: 1 @@ -481,6 +582,25 @@ Humanoid-v3: net_arch=[dict(pi=[256, 256], vf=[256, 256])] )" +# Tuned +seals/Humanoid-v0: + normalize: true + n_envs: 1 + policy: 'MlpPolicy' + n_timesteps: !!float 1e7 + batch_size: 256 + clip_range: 0.2 + ent_coef: 2.0745206045994986e-05 + gae_lambda: 0.92 + gamma: 0.999 + learning_rate: 2.0309225666232827e-05 + max_grad_norm: 0.5 + n_epochs: 20 + n_steps: 2048 + policy_kwargs: dict(activation_fn=nn.ReLU, net_arch=[dict(pi=[256, 256], vf=[256, + 256])]) + vf_coef: 0.819262464558427 + InvertedDoublePendulum-v3: normalize: true n_envs: 1 @@ -544,3 +664,22 @@ Walker2d-v3: gae_lambda: 0.95 max_grad_norm: 1 vf_coef: 0.871923 + +# Tuned +seals/Walker2d-v0: + normalize: true + n_envs: 1 + policy: 'MlpPolicy' + n_timesteps: !!float 1e6 + batch_size: 8 + clip_range: 0.4 + ent_coef: 0.00013057334805552262 + gae_lambda: 0.92 + gamma: 0.98 + learning_rate: 3.791707778339674e-05 + max_grad_norm: 0.6 + n_epochs: 5 + n_steps: 2048 + policy_kwargs: dict(activation_fn=nn.ReLU, net_arch=[dict(pi=[256, 256], vf=[256, + 256])]) + vf_coef: 0.6167177795726859 diff --git a/hyperparams/qrdqn.yml b/hyperparams/qrdqn.yml index d1da3e5fd..31275d9be 100644 --- a/hyperparams/qrdqn.yml +++ b/hyperparams/qrdqn.yml @@ -25,6 +25,21 @@ CartPole-v1: exploration_final_eps: 0.04 policy_kwargs: "dict(net_arch=[256, 256], n_quantiles=10)" +seals/CartPole-v0: + n_timesteps: !!float 5e4 + policy: 'MlpPolicy' + learning_rate: !!float 2.3e-3 + batch_size: 64 + buffer_size: 100000 + learning_starts: 1000 + gamma: 0.99 + target_update_interval: 10 + train_freq: 256 + gradient_steps: 128 + exploration_fraction: 0.16 + exploration_final_eps: 0.04 + policy_kwargs: "dict(net_arch=[256, 256], n_quantiles=10)" + # Tuned MountainCar-v0: n_timesteps: !!float 1.2e5 @@ -41,6 +56,21 @@ MountainCar-v0: exploration_final_eps: 0.07 policy_kwargs: "dict(net_arch=[256, 256], n_quantiles=25)" +seals/MountainCar-v0: + n_timesteps: !!float 1.2e5 + policy: 'MlpPolicy' + learning_rate: !!float 4e-3 + batch_size: 128 + buffer_size: 10000 + learning_starts: 1000 + gamma: 0.98 + target_update_interval: 600 + train_freq: 16 + gradient_steps: 8 + exploration_fraction: 0.2 + exploration_final_eps: 0.07 + policy_kwargs: "dict(net_arch=[256, 256], n_quantiles=25)" + # Tuned LunarLander-v2: n_timesteps: !!float 1e5 diff --git a/hyperparams/sac.yml b/hyperparams/sac.yml index 9d41262e4..d49a29435 100644 --- a/hyperparams/sac.yml +++ b/hyperparams/sac.yml @@ -194,23 +194,96 @@ HalfCheetah-v3: &mujoco-defaults policy: 'MlpPolicy' learning_starts: 10000 +# Tuned +seals/HalfCheetah-v0: + <<: *mujoco-defaults + batch_size: 2048 + buffer_size: 100000 + gamma: 0.95 + learning_rate: 0.000884624878315995 + learning_starts: 10000 + policy_kwargs: dict(net_arch=[64, 64], log_std_init=-0.6932709443503001) + tau: 0.01 + train_freq: 64 + Ant-v3: <<: *mujoco-defaults +# Tuned +seals/Ant-v0: + <<: *mujoco-defaults + batch_size: 512 + buffer_size: 1000000 + gamma: 0.98 + learning_rate: 0.0018514039303149058 + learning_starts: 1000 + policy_kwargs: dict(net_arch=[256, 256], log_std_init=-2.2692589009754176) + tau: 0.05 + train_freq: 64 + Hopper-v3: <<: *mujoco-defaults +# Tuned +seals/Hopper-v0: + <<: *mujoco-defaults + batch_size: 128 + buffer_size: 100000 + gamma: 0.98 + learning_rate: 0.001709807687567946 + learning_starts: 1000 + policy_kwargs: dict(net_arch=[256, 256], log_std_init=-1.6829391077276037) + tau: 0.08 + train_freq: 32 + Walker2d-v3: <<: *mujoco-defaults +# Tuned +seals/Walker2d-v0: + <<: *mujoco-defaults + batch_size: 128 + buffer_size: 100000 + gamma: 0.99 + learning_rate: 0.0005845844772048097 + learning_starts: 1000 + policy_kwargs: dict(net_arch=[400, 300], log_std_init=0.1955317469998743) + tau: 0.02 + train_freq: 1 + Humanoid-v3: <<: *mujoco-defaults n_timesteps: !!float 2e6 +# Tuned +seals/Humanoid-v0: + <<: *mujoco-defaults + n_timesteps: !!float 2e6 + batch_size: 64 + buffer_size: 100000 + gamma: 0.98 + learning_rate: 4.426351861707874e-05 + learning_starts: 20000 + policy_kwargs: dict(net_arch=[400, 300], log_std_init=-0.1034412732183072) + tau: 0.08 + train_freq: 8 + Swimmer-v3: <<: *mujoco-defaults gamma: 0.9999 +# Tuned +seals/Swimmer-v0: + <<: *mujoco-defaults + batch_size: 128 + buffer_size: 100000 + gamma: 0.995 + learning_rate: 0.00039981805535514633 + learning_starts: 1000 + policy_kwargs: dict(net_arch=[400, 300], log_std_init=-2.689958330139309) + tau: 0.01 + train_freq: 256 + # === HER Robotics GoalEnvs === FetchReach-v1: diff --git a/hyperparams/td3.yml b/hyperparams/td3.yml index 9b941516b..c29d49199 100644 --- a/hyperparams/td3.yml +++ b/hyperparams/td3.yml @@ -133,9 +133,15 @@ HalfCheetah-v3: &mujoco-defaults noise_type: 'normal' noise_std: 0.1 +seals/HalfCheetah-v0: + <<: *mujoco-defaults + Ant-v3: <<: *mujoco-defaults +seals/Ant-v0: + <<: *mujoco-defaults + Hopper-v3: <<: *mujoco-defaults # SAC Hyperparams @@ -144,9 +150,20 @@ Hopper-v3: learning_rate: !!float 3e-4 batch_size: 256 +seals/Hopper-v0: + <<: *mujoco-defaults + # SAC Hyperparams + train_freq: 1 + gradient_steps: 1 + learning_rate: !!float 3e-4 + batch_size: 256 + Walker2d-v3: <<: *mujoco-defaults +seals/Walker2d-v0: + <<: *mujoco-defaults + Humanoid-v3: <<: *mujoco-defaults n_timesteps: !!float 2e6 @@ -156,9 +173,24 @@ Humanoid-v3: learning_rate: !!float 3e-4 batch_size: 256 +seals/Humanoid-v0: + <<: *mujoco-defaults + n_timesteps: !!float 2e6 + # SAC Hyperparams + train_freq: 1 + gradient_steps: 1 + learning_rate: !!float 3e-4 + batch_size: 256 + # Tuned Swimmer-v3: <<: *mujoco-defaults gamma: 0.9999 train_freq: 1 gradient_steps: 1 + +seals/Swimmer-v0: + <<: *mujoco-defaults + gamma: 0.9999 + train_freq: 1 + gradient_steps: 1 diff --git a/hyperparams/tqc.yml b/hyperparams/tqc.yml index d26289bef..1dd42fc88 100644 --- a/hyperparams/tqc.yml +++ b/hyperparams/tqc.yml @@ -145,24 +145,45 @@ HalfCheetah-v3: &mujoco-defaults policy: 'MlpPolicy' learning_starts: 10000 +seals/HalfCheetah-v0: + <<: *mujoco-defaults + Ant-v3: <<: *mujoco-defaults +seals/Ant-v0: + <<: *mujoco-defaults + Hopper-v3: <<: *mujoco-defaults top_quantiles_to_drop_per_net: 5 +seals/Hopper-v0: + <<: *mujoco-defaults + top_quantiles_to_drop_per_net: 5 + Walker2d-v3: <<: *mujoco-defaults +seals/Walker2d-v0: + <<: *mujoco-defaults + Humanoid-v3: <<: *mujoco-defaults n_timesteps: !!float 2e6 +seals/Humanoid-v0: + <<: *mujoco-defaults + n_timesteps: !!float 2e6 + Swimmer-v3: <<: *mujoco-defaults gamma: 0.9999 +seals/Swimmer-v0: + <<: *mujoco-defaults + gamma: 0.9999 + # === HER Robotics GoalEnvs === FetchReach-v1: env_wrapper: sb3_contrib.common.wrappers.TimeFeatureWrapper diff --git a/hyperparams/trpo.yml b/hyperparams/trpo.yml index c78263338..01fbfccba 100644 --- a/hyperparams/trpo.yml +++ b/hyperparams/trpo.yml @@ -11,6 +11,18 @@ CartPole-v1: learning_rate: !!float 1e-3 n_critic_updates: 20 +seals/CartPole-v0: + n_envs: 2 + n_timesteps: !!float 1e5 + policy: 'MlpPolicy' + n_steps: 512 + batch_size: 512 + cg_damping: !!float 1e-3 + gae_lambda: 0.98 + gamma: 0.99 + learning_rate: !!float 1e-3 + n_critic_updates: 20 + # Tuned Pendulum-v1: n_envs: 2 @@ -60,6 +72,14 @@ MountainCar-v0: n_steps: 1024 n_critic_updates: 20 +seals/MountainCar-v0: + normalize: true + n_envs: 2 + n_timesteps: !!float 1e5 + policy: 'MlpPolicy' + n_steps: 1024 + n_critic_updates: 20 + # Tuned MountainCarContinuous-v0: normalize: True @@ -122,25 +142,49 @@ Ant-v3: &mujoco-defaults <<: *pybullet-defaults n_timesteps: !!float 1e6 +seals/Ant-v0: + <<: *mujoco-defaults + # Tuned HalfCheetah-v3: <<: *mujoco-defaults target_kl: 0.04 + +seals/HalfCheetah-v0: + <<: *mujoco-defaults + target_kl: 0.04 + # Tuned Hopper-v3: <<: *mujoco-defaults + +seals/Hopper-v0: + <<: *mujoco-defaults + # Tuned Walker2d-v3: <<: *mujoco-defaults +seals/Walker2d-v0: + <<: *mujoco-defaults + Humanoid-v3: <<: *mujoco-defaults n_timesteps: !!float 2e6 + +seals/Humanoid-v0: + <<: *mujoco-defaults + n_timesteps: !!float 2e6 + # Tuned Swimmer-v3: <<: *mujoco-defaults gamma: 0.9999 +seals/Swimmer-v0: + <<: *mujoco-defaults + gamma: 0.9999 + # Tuned BipedalWalker-v3: <<: *mujoco-defaults