|
40 | 40 | "\n",
|
41 | 41 | "for log_file in experiment_log_files:\n",
|
42 | 42 | " d = dict()\n",
|
43 |
| - " \n", |
44 |
| - " d['logfile'] = log_file\n", |
45 |
| - " \n", |
46 |
| - " study = optuna.load_study(storage=optuna.storages.JournalStorage(\n", |
| 43 | + "\n", |
| 44 | + " d[\"logfile\"] = log_file\n", |
| 45 | + "\n", |
| 46 | + " study = optuna.load_study(\n", |
| 47 | + " storage=optuna.storages.JournalStorage(\n", |
47 | 48 | " optuna.storages.JournalFileStorage(str(log_file))\n",
|
48 | 49 | " ),\n",
|
49 | 50 | " # in our case, we have one journal file per study so the study name can be\n",
|
50 | 51 | " # inferred\n",
|
51 | 52 | " study_name=None,\n",
|
52 | 53 | " )\n",
|
53 |
| - " d['study'] = study\n", |
54 |
| - " d['study_name'] = study.study_name\n", |
55 |
| - " \n", |
| 54 | + " d[\"study\"] = study\n", |
| 55 | + " d[\"study_name\"] = study.study_name\n", |
| 56 | + "\n", |
56 | 57 | " trial_state_counter = Counter(t.state for t in study.trials)\n",
|
57 | 58 | " n_completed_trials = trial_state_counter[TrialState.COMPLETE]\n",
|
58 |
| - " d['trials'] = n_completed_trials\n", |
59 |
| - " d['trials_running'] = Counter(t.state for t in study.trials)[TrialState.RUNNING]\n", |
60 |
| - " d['trials_failed'] = Counter(t.state for t in study.trials)[TrialState.FAIL]\n", |
61 |
| - " d['all_trials'] = len(study.trials)\n", |
62 |
| - " \n", |
| 59 | + " d[\"trials\"] = n_completed_trials\n", |
| 60 | + " d[\"trials_running\"] = Counter(t.state for t in study.trials)[TrialState.RUNNING]\n", |
| 61 | + " d[\"trials_failed\"] = Counter(t.state for t in study.trials)[TrialState.FAIL]\n", |
| 62 | + " d[\"all_trials\"] = len(study.trials)\n", |
| 63 | + "\n", |
63 | 64 | " if n_completed_trials > 0:\n",
|
64 |
| - " d['best_value'] = round(study.best_trial.value, 2)\n", |
65 |
| - " \n", |
| 65 | + " d[\"best_value\"] = round(study.best_trial.value, 2)\n", |
| 66 | + "\n", |
66 | 67 | " assert \"_\" in study.study_name\n",
|
67 |
| - " study_segments = study.study_name.split(\"_\") \n", |
| 68 | + " study_segments = study.study_name.split(\"_\")\n", |
68 | 69 | " assert len(study_segments) > 3\n",
|
69 | 70 | " tuning, algo, with_ = study_segments[:3]\n",
|
70 | 71 | " assert (tuning, with_) == (\"tuning\", \"with\")\n",
|
71 |
| - " \n", |
72 |
| - " d['algo'] = algo\n", |
73 |
| - " d['env'] = \"_\".join(study_segments[3:])\n", |
74 |
| - " d['best_trial_duration'] = study.best_trial.duration\n", |
75 |
| - " d['mean_duration'] = sum([t.duration for t in study.trials if t.state == TrialState.COMPLETE], datetime.timedelta())/n_completed_trials\n", |
76 |
| - " \n", |
| 72 | + "\n", |
| 73 | + " d[\"algo\"] = algo\n", |
| 74 | + " d[\"env\"] = \"_\".join(study_segments[3:])\n", |
| 75 | + " d[\"best_trial_duration\"] = study.best_trial.duration\n", |
| 76 | + " d[\"mean_duration\"] = (\n", |
| 77 | + " sum(\n", |
| 78 | + " [t.duration for t in study.trials if t.state == TrialState.COMPLETE],\n", |
| 79 | + " datetime.timedelta(),\n", |
| 80 | + " )\n", |
| 81 | + " / n_completed_trials\n", |
| 82 | + " )\n", |
| 83 | + "\n", |
77 | 84 | " reruns_folder = log_file.parent / \"reruns\"\n",
|
78 |
| - " rerun_results = [round(run['result']['imit_stats']['monitor_return_mean'], 2)\n", |
79 |
| - " for conf, run in sfp.find_sacred_runs(reruns_folder, only_completed_runs=True)]\n", |
80 |
| - " d['rerun_values'] = rerun_results\n", |
81 |
| - " \n", |
| 85 | + " rerun_results = [\n", |
| 86 | + " round(run[\"result\"][\"imit_stats\"][\"monitor_return_mean\"], 2)\n", |
| 87 | + " for conf, run in sfp.find_sacred_runs(reruns_folder, only_completed_runs=True)\n", |
| 88 | + " ]\n", |
| 89 | + " d[\"rerun_values\"] = rerun_results\n", |
| 90 | + "\n", |
82 | 91 | " raw_study_data.append(d)\n",
|
83 |
| - " \n", |
| 92 | + "\n", |
84 | 93 | "study_data = pd.DataFrame(raw_study_data)"
|
85 | 94 | ]
|
86 | 95 | },
|
|
103 | 112 | " \"seals_humanoid\",\n",
|
104 | 113 | " \"seals_cartpole\",\n",
|
105 | 114 | " \"pendulum\",\n",
|
106 |
| - " \"seals_mountain_car\"\n", |
| 115 | + " \"seals_mountain_car\",\n", |
107 | 116 | "]\n",
|
108 | 117 | "\n",
|
109 | 118 | "pc_paper_700 = dict(\n",
|
|
163 | 172 | " for env, value in values_by_env.items():\n",
|
164 | 173 | " if value == \"-\":\n",
|
165 | 174 | " continue\n",
|
166 |
| - " raw_study_data.append(dict(\n", |
167 |
| - " algo=algo,\n", |
168 |
| - " env=env,\n", |
169 |
| - " best_value=value,\n", |
170 |
| - " ))\n", |
171 |
| - " \n", |
| 175 | + " raw_study_data.append(\n", |
| 176 | + " dict(\n", |
| 177 | + " algo=algo,\n", |
| 178 | + " env=env,\n", |
| 179 | + " best_value=value,\n", |
| 180 | + " )\n", |
| 181 | + " )\n", |
| 182 | + "\n", |
172 | 183 | "study_data = pd.DataFrame(raw_study_data)"
|
173 | 184 | ]
|
174 | 185 | },
|
|
185 | 196 | "display(study_data[[\"algo\", \"env\", \"best_value\"]])\n",
|
186 | 197 | "\n",
|
187 | 198 | "print(\"Rerun Data\")\n",
|
188 |
| - "display(study_data[[\"algo\", \"env\", \"best_value\", \"rerun_values\"]][study_data[\"rerun_values\"].map(np.std) > 0])" |
| 199 | + "display(\n", |
| 200 | + " study_data[[\"algo\", \"env\", \"best_value\", \"rerun_values\"]][\n", |
| 201 | + " study_data[\"rerun_values\"].map(np.std) > 0\n", |
| 202 | + " ]\n", |
| 203 | + ")" |
189 | 204 | ]
|
190 | 205 | }
|
191 | 206 | ],
|
|
0 commit comments