-
Notifications
You must be signed in to change notification settings - Fork 0
/
run_experiments.py
575 lines (445 loc) · 23.6 KB
/
run_experiments.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
"""
Main file to run experiments and show animation.
Note: To make the animation work in Spyder you should set graphics backend to 'Automatic' (Preferences > Graphics > Graphics Backend).
"""
#!/usr/bin/python
from random import randint
from random import shuffle
from math import inf
import matplotlib.pyplot as plt
import argparse
import glob
from pathlib import Path
from cbs import CBSSolver
from independent import IndependentSolver
from prioritized import PrioritizedPlanningSolver
from distributed import DistributedPlanningSolver # Placeholder for Distributed Planning
from visualize import Animation
from single_agent_planner import get_sum_of_cost
import pandas as pd
import time as timer
SOLVER = "Prioritized"
# Determine whether to perform statistical analysis or to perform a single simulation.
statistical_analysis = True
# Should values from the assignment.txt file be used or should start/goal locations
# be completely random? Note that statistical analysis is always fully random.
full_random = True
# CPU cut-off time
CPU_cutoff_time = 5
# Parameters for statistical analysis
simulations_per_agent = 50
agent_range = range(2,11)
"""
Algorithms to consider for statistical analysis. These can easily be turned off/on by adding/removing "not" True
Computing all combined takes about 30 minutes ~ 1 hour to run. Quicker simulations can be performed by using the Prioritized or PrioritizedPlusSimple algorithms
as those are much faster. Additionally, the cpu cutoff time, or the amount of simulations can be lowered.
"""
#Prioritized = not True
Prioritized = True
PrioritizedPlusSimple = True
PrioritizedPlus = True
CBS = True
Distributed = True
algorithms = ["Prioritized", "PrioritizedPlus", "PrioritizedPlusSimple", "CBS", "Distributed"]
algorithms_used = [Prioritized, PrioritizedPlus, PrioritizedPlusSimple, CBS, Distributed]
algorithm_colors = {"Prioritized": 'blue',
"PrioritizedPlus": 'yellow',
"PrioritizedPlusSimple": 'green',
"CBS": 'orange',
"Distributed": 'red'}
def print_mapf_instance(my_map, starts, goals):
"""
Prints start location and goal location of all agents, using @ for an obstacle, . for an open cell, and
a number for the start location of each agent.
Example:
@ @ @ @ @ @ @
@ 0 1 . . . @
@ @ @ . @ @ @
@ @ @ @ @ @ @
"""
print('Start locations')
print_locations(my_map, starts)
print('Goal locations')
print_locations(my_map, goals)
def print_locations(my_map, locations):
"""
See docstring print_mapf_instance function above.
"""
starts_map = [[-1 for _ in range(len(my_map[0]))] for _ in range(len(my_map))]
for i in range(len(locations)):
starts_map[locations[i][0]][locations[i][1]] = i
to_print = ''
for x in range(len(my_map)):
for y in range(len(my_map[0])):
if starts_map[x][y] >= 0:
to_print += str(starts_map[x][y]) + ' '
elif my_map[x][y]:
to_print += '@ '
else:
to_print += '. '
to_print += '\n'
print(to_print)
def random_start(number_of_agents, start_locs=None, goal_locs=None):
"""
This function spawns the agents randomly on the first two columns, and assigns their location on the opposite
side of the map. A new location is tried to be found in case a coordinate is already in use. This approach could
computationally be slightly improved, but this was not deemed necessary.
"""
if start_locs == None:
start_locs = []
if goal_locs == None:
goal_locs = []
for agent in range(number_of_agents):
while True:
y_start = randint(0,8)
x_start = randint(0,1)
valuepair_start = (y_start, x_start)
y_goal = 8 - y_start
x_goal = 21 - x_start
valuepair_goal = (y_goal, x_goal)
if valuepair_goal not in goal_locs and valuepair_start not in start_locs:
goal_locs.append(valuepair_goal)
start_locs.append(valuepair_start)
break
# print(start_locs, goal_locs)
return start_locs, goal_locs
def import_mapf_instance(filename):
"""
Imports mapf instance from instances folder. Expects input as a .txt file in the following format:
Line1: #rows #columns (number of rows and columns)
Line2-X: Grid of @ and . symbols with format #rows * #columns. The @ indicates an obstacle, whereas . indicates free cell.
Line X: #agents (number of agents)
Line X+1: xCoordStart yCoordStart xCoordGoal yCoordGoal (xy coordinate start and goal for Agent 1)
Line X+2: xCoordStart yCoordStart xCoordGoal yCoordGoal (xy coordinate start and goal for Agent 2)
Line X+n: xCoordStart yCoordStart xCoordGoal yCoordGoal (xy coordinate start and goal for Agent n)
Example:
4 7 # grid with 4 rows and 7 columns
@ @ @ @ @ @ @ # example row with obstacle in every column
@ . . . . . @ # example row with 5 free cells in the middle
@ @ @ . @ @ @
@ @ @ @ @ @ @
2 # 2 agents in this experiment
1 1 1 5 # agent 1 starts at (1,1) and has (1,5) as goal
1 2 1 4 # agent 2 starts at (1,2) and has (1,4) as goal
"""
f = Path(filename)
if not f.is_file():
raise BaseException(filename + " does not exist.")
f = open(filename, 'r')
# first line: #rows #columns
line = f.readline()
rows, columns = [int(x) for x in line.split(' ')]
rows = int(rows)
columns = int(columns)
# #rows lines with the map
my_map = []
for r in range(rows):
line = f.readline()
my_map.append([])
for cell in line:
if cell == '@':
my_map[-1].append(True)
elif cell == '.':
my_map[-1].append(False)
# #agents
line = f.readline()
num_agents = int(line)
starts = []
goals = []
for a in range(num_agents):
line = f.readline()
# Any agents which do not have a start and goal location are given
# these randomly.
if not line:
starts, goals = random_start(num_agents-a, starts, goals)
break
sx, sy, gx, gy = [int(x) for x in line.split(' ')]
starts.append((sx, sy))
goals.append((gx, gy))
f.close()
return my_map, starts, goals
def cost_plot(dfs):
""""
This function takes the large created dataframe, and categorizes it by grouping the
minimum, maximum and mean value for each number of agents. These values are then plotted.
"""
plt.figure()
#plt.title('Total costs per number of agents ')
plt.xlabel('Agents [-]')
plt.ylabel('Average cost [-]')
current_cost_max = 0
current_cost_min = inf
for df_key in dfs.keys():
color = algorithm_colors[df_key]
dfs[df_key].groupby('agents')['costs'].max().plot(color = color, label="", alpha=0.35)
dfs[df_key].groupby('agents')['costs'].min().plot(color = color, label="", alpha=0.35)
dfs[df_key].groupby('agents')['costs'].mean().plot(color = color, label=df_key, ylim = 0)
#plt.legend(['maximum', 'minimum' , 'mean'])
costs_max = dfs[df_key].groupby('agents')['costs'].max()
costs_min = dfs[df_key].groupby('agents')['costs'].min()
cost_max = costs_max.rename_axis(index=None).max()
cost_min = costs_min.rename_axis(index=None).min()
current_cost_max = max(cost_max, current_cost_max)
current_cost_min = min(cost_min, current_cost_min)
plt.fill_between(agent_range,costs_max, costs_min, color=color, alpha=0.25)
plt.legend()
plt.ylim(0, 1.05*current_cost_max)
#plt.fill_between(costs_max,costs_min)
#successrate = df.groupby('agents')['success'].mean()
#finishtimes = df.groupby('agents')['finish times'].mean()
plt.show()
def succes_plot(dfs):
""""
This function takes the large created dataframe, and categorizes it by grouping the
success rates for the different algorithms. As the successes are stored as either True or False Boolean's
(Which are represented by 1's and 0's, allowing the average to be taken. These values are then plotted.
"""
plt.figure()
#plt.title('Success rate per number of agents')
plt.xlabel('Agents [-]')
plt.ylabel('Success rate [-]')
#plt.ylim(0, 1.2)
for df_key in dfs.keys():
color = algorithm_colors[df_key]
df = dfs[df_key]
success_rate = df.groupby('agents')['success'].mean()
success_rate.plot(label=df_key, color=color, ylim = [0,1.2])
plt.legend()
plt.show()
def finish_times_plot(dfs):
"""
The finish times are defined as the time it takes for all the agents to reach their goal location.(The total set-up time)
This value is found by taking the maximum path length of each solution. The maximum, minimum, and mean values for each number of agents are plotted.
"""
plt.figure()
#plt.title('Finish times per number of agents')
plt.xlabel('Agents [-]')
plt.ylabel('Finish time [-]')
current_finish_time_max = 0
current_finish_time_min = inf
for df_key in dfs.keys():
color = algorithm_colors[df_key]
dfs[df_key].groupby('agents')['finish times'].max().plot(color=color, label="", alpha=0.35)
dfs[df_key].groupby('agents')['finish times'].min().plot(color=color, label="", alpha=0.35)
dfs[df_key].groupby('agents')['finish times'].mean().plot(color=color, label=df_key, ylim = 0)
finish_times_max = dfs[df_key].groupby('agents')['finish times'].max()
finish_times_min = dfs[df_key].groupby('agents')['finish times'].min()
finish_time_max = finish_times_max.rename_axis(index=None).max()
finish_time_min = finish_times_min.rename_axis(index=None).min()
current_finish_time_max = max(finish_time_max, current_finish_time_max)
current_finish_time_min = min(finish_time_min, current_finish_time_min)
plt.fill_between(agent_range,finish_times_max, finish_times_min, color=color, alpha=0.25)
plt.legend()
plt.ylim(0, 1.05*current_finish_time_max)
plt.show()
def cpu_times_plot(dfs):
"""
The cpu time is found by subtracting the time the solution algorithm was started, from the time the algorithm was finished.
Whenever cut-off times are implemented, a value of None is added to the dictionary: Thus only the cpu times of valid solutions are plotted.
"""
plt.figure()
plt.xlabel('Agents [-]')
plt.ylabel('CPU time [s]')
for df_key in dfs.keys():
color = algorithm_colors[df_key]
dfs[df_key].groupby('agents')['cpu_times'].mean().plot(color=color, label=df_key)
plt.legend()
plt.show()
def add_to_database(algorithm, paths, cpu_time):
#cpu_time = timer.time() - start_time
"""
This function checks whether an algorithm was able to find a valid solution, and adds the parameters to the database accordingly
"""
if not paths == None:
costs[algorithm].append(get_sum_of_cost(paths)/agents)
finish_times[algorithm].append(len(max(paths)))
success[algorithm].append(True)
cpu_times[algorithm].append(cpu_time)
if paths == None:
costs[algorithm].append(None)
finish_times[algorithm].append(None)
success[algorithm].append(False)
cpu_times[algorithm].append(None)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Runs various MAPF algorithms')
parser.add_argument('--instance', type=str, default=None,
help='The name of the instance file(s)')
parser.add_argument('--batch', action='store_true', default=False,
help='Use batch output instead of animation')
parser.add_argument('--disjoint', action='store_true', default=False,
help='Use the disjoint splitting')
parser.add_argument('--solver', type=str, default=SOLVER,
help='The solver to use (one of: {CBS,Independent,Prioritized}), defaults to ' + str(SOLVER))
args = parser.parse_args()
# Hint: Command line options can be added in Spyder by pressing CTRL + F6 > Command line options.
# In PyCharm, they can be added as parameters in the configuration.
result_file = open("results.csv", "w", buffering=1)
if not statistical_analysis:
for file in sorted(glob.glob(args.instance)):
print("***Import an instance***")
my_map, starts, goals = import_mapf_instance(file)
if full_random:
starts, goals = random_start(len(starts))
print_mapf_instance(my_map, starts, goals)
if args.solver == "CBS":
print("***Run CBS***")
cbs = CBSSolver(my_map, starts, goals, CPU_cutoff_time)
paths = cbs.find_solution(args.disjoint)
elif args.solver == "Independent":
print("***Run Independent***")
solver = IndependentSolver(my_map, starts, goals)
paths = solver.find_solution()
elif args.solver == "Prioritized":
print("***Run Prioritized***")
solver = PrioritizedPlanningSolver(my_map, starts, goals, CPU_cutoff_time)
paths = solver.find_solution()
elif args.solver == "Distributed": # Wrapper of distributed planning solver class
print("***Run Distributed Planning***")
solver = DistributedPlanningSolver(my_map, starts, goals, CPU_cutoff_time)
paths = solver.find_solution()
else:
raise RuntimeError("Unknown solver!")
cost = get_sum_of_cost(paths)
result_file.write("{},{}\n".format(file, cost))
if not args.batch:
print("***Test paths on a simulation***")
animation = Animation(my_map, starts, goals, paths)
# animation.save("output.mp4", 1.0) # install ffmpeg package to use this option
animation.show()
if statistical_analysis:
for file in sorted(glob.glob(args.instance)):
print("***Import an instance***")
my_map, starts, goals = import_mapf_instance(file)
print_mapf_instance(my_map, starts, goals)
costs = dict()
finish_times = dict()
success = dict()
cpu_times = dict()
for i, algorithm in enumerate(algorithms):
if not algorithms_used[i]:
continue
#print(algorithm)
costs[algorithm] = []
finish_times[algorithm] = []
success[algorithm] = []
cpu_times[algorithm] = []
nums_of_agents = []
for agents in agent_range:
for simulations in range(simulations_per_agent):
print("Iteration", simulations, "for", agents, "agents. Total progress:",
round(((agents-min(agent_range))*simulations_per_agent+simulations)/
(len(agent_range)*simulations_per_agent)*100,1), "%")
nums_of_agents.append(agents)
my_map, starts, goals = import_mapf_instance(file)
starts, goals = random_start(agents)
if CBS:
start_time = timer.time()
#print("***Run CBS***")
cbs = CBSSolver(my_map, starts, goals, CPU_cutoff_time)
paths = cbs.find_solution(args.disjoint)
cpu_time = timer.time() - start_time
add_to_database("CBS", paths, cpu_time)
if Prioritized:
start_time = timer.time()
solver = PrioritizedPlanningSolver(my_map, starts, goals, CPU_cutoff_time)
paths = solver.find_solution()
cpu_time = timer.time() - start_time
add_to_database("Prioritized", paths,cpu_time)
if PrioritizedPlusSimple:
"""
This algorithm functions for its first iteration similarily to the prioritized method: it tries to find a solution for each
agent in the given order of priority. It is then checked whether this yields a valid solution. If not, the starts and goals are then shuffled in the same way
(This needs to be done simultaneously, because if the starts and goals were shuffled seperately, then also the goal location of each agent would change)
"""
start_time = timer.time()
paths = None
while paths == None:
solver = PrioritizedPlanningSolver(my_map, starts, goals, CPU_cutoff_time)
paths = solver.find_solution()
# Reorder start-goal pairs.
temp = list(zip(starts, goals))
shuffle(temp)
res1, res2 = zip(*temp)
starts, goals = list(res1), list(res2)
cpu_time = timer.time() - start_time
add_to_database("PrioritizedPlusSimple", paths, cpu_time)
if PrioritizedPlus:
"""
The prioritizedPlus algorithm tries to find a solution using the distributed method. When this fails to find a solution, the "PrioritizedPlusSimple"
method will yield a valid solution. Right now both are calculated and the lowest cost path is chosen; This was done because the distributed algorithm sometimes
yield very high cost solutions. Whence the distributed algorithm gets improved we can let it try to find a (hopefully near-optimal) solution, and only
once no solution can be found that the centralized prioritized method takes over.
"""
start_time = timer.time()
"""
Here also the CBS solver could also be used: this actually yields lower cost solutions for a small number of agents. However, the ultimate goal is
is to improve upon the distributed method such that it will also gives efficient solutions. Then, the hybrid approach makes more sense to use. It is even possible
that the distributed algorithm becomes so effective that it will álways find a solution, at which point the prioritized part can be turned off.
However, this is hard to guarantee, and thus the prioritized part will act as a safety net for the near-future, in which the distributed algorithm is still being developed.
"""
solver = DistributedPlanningSolver(my_map, starts, goals, CPU_cutoff_time)
paths1 = solver.find_solution()
paths2 = None
if paths2 == None:
while paths2 == None:
solver = PrioritizedPlanningSolver(my_map, starts, goals, CPU_cutoff_time)
paths2 = solver.find_solution()
# Shuffle two lists with same order
# Using zip() + * operator + shuffle()
temp = list(zip(starts, goals))
shuffle(temp)
res1, res2 = zip(*temp)
starts, goals = list(res1), list(res2)
if paths1 == None and not paths2 == None:
paths = paths2
elif not paths1 == None and not paths2 == None:
#paths = min(len(paths1), paths2) this was a faulty approach!
if get_sum_of_cost(paths1) > get_sum_of_cost(paths2):
paths = paths2
else:
paths = paths1
elif not paths1 == None and paths2 == None:
paths = paths1
else:
paths = None
# if get_sum_of_cost(paths) > 400:
# print(paths1)
# print("paths2", paths2)
# Some simulations would be very inefficient, this condition visualizes those edge cases
# if not paths == None:
# if get_sum_of_cost(paths) > 400:
# animation = Animation(my_map, starts, goals, paths)
# animation.show()
cpu_time = timer.time() - start_time
add_to_database("PrioritizedPlus", paths, cpu_time)
if Distributed:
start_time = timer.time()
#print("***Run Distributed Planning***")
solver = DistributedPlanningSolver(my_map, starts, goals, CPU_cutoff_time)
paths = solver.find_solution()
cpu_time = timer.time() - start_time
add_to_database("Distributed", paths, cpu_time)
#print(costs, finish_times, success, cpu_times, nums_of_agents)
dfs = dict()
for i, algorithm in enumerate(algorithms):
if not algorithms_used[i]:
continue
d = {'costs': costs[algorithm],
'finish times': finish_times[algorithm],
'success': success[algorithm],
'cpu_times': cpu_times[algorithm],
'agents': nums_of_agents
}
dfs[algorithm] = pd.DataFrame(data = d)
dfs[algorithm].to_csv("analyses/L3-" + algorithm + ".csv")
cost_plot(dfs)
succes_plot(dfs)
finish_times_plot(dfs)
cpu_times_plot(dfs)
animate = False
if animate and not args.batch:
print("***Test paths on a simulation***")
animation = Animation(my_map, starts, goals, paths)
# animation.save("output.mp4", 1.0) # install ffmpeg package to use this option
animation.show()
result_file.close()