Skip to content

Commit 88a4693

Browse files
author
Bastien Vallet
committed
[ruff] Enable extra plugins
1 parent 2c941d5 commit 88a4693

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

60 files changed

+241
-225
lines changed

csv/convert.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -58,8 +58,8 @@ def create_csv_power(out_file: pathlib.Path, data):
5858
job_name = result.get("job_name", "")
5959
job_number = result.get("job_number", "")
6060
monitoring = result.get("monitoring", {})
61-
for category in monitoring.keys():
62-
for typ in monitoring[category].keys():
61+
for category in monitoring:
62+
for typ in monitoring[category]:
6363
measures = monitoring[category][typ]
6464
events = measures.get("events", [])
6565
unit = measures.get("unit")
@@ -108,7 +108,7 @@ def print_memrates(out, results):
108108
job_name = result.get("job_name", "")
109109
job_number = result.get("job_number", "")
110110
workers = result.get("workers")
111-
for key in result.keys():
111+
for key in result:
112112
if isinstance(result[key], dict) and "sum_speed" in result[key]:
113113
result_list.append(
114114
{

graph/chassis.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ def get_marker(category: PowerCategories) -> str:
107107
"Time [seconds]",
108108
y_label,
109109
outdir,
110-
f"time_watt_{base_outfile}_by_{str(graph_type)}",
110+
f"time_watt_{base_outfile}_by_{graph_type!s}",
111111
)
112112

113113
if graph_type == PowerCategories.SERVERINCHASSIS:
@@ -126,7 +126,7 @@ def get_marker(category: PowerCategories) -> str:
126126
y_serie = np.array(sum_serie_to_plot[str(component)])[order]
127127
curve_label = str(component)
128128
if component in [PowerCategories.SERVER, PowerCategories.SERVERINCHASSIS]:
129-
curve_label = f"sum of {str(component)}"
129+
curve_label = f"sum of {component!s}"
130130
graph.get_ax().plot(x_serie, y_serie, "", label=curve_label, marker=get_marker(component))
131131

132132
for trace in args.traces:

graph/graph.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -215,22 +215,22 @@ def generic_graph(
215215

216216
components = bench.get_all_metrics(component_type, filter)
217217
if not len(components):
218-
title = f"{item_title}: no {str(component_type)} metric found"
218+
title = f"{item_title}: no {component_type!s} metric found"
219219
if filter:
220220
title += f" with filter = '{filter}'"
221221
return 0
222222

223223
samples_count = bench.get_samples_count()
224224
unit = bench.get_metric_unit(component_type)
225-
title = f'{item_title} during "{bench.get_bench_name()}" benchmark job\n' f"{args.title}\n" f"\n Stressor: "
225+
title = f'{item_title} during "{bench.get_bench_name()}" benchmark job\n{args.title}\n\n Stressor: '
226226
title += f"{bench.workers()} x {bench.get_title_engine_name()} for {bench.duration()} seconds"
227227
title += f"\n{bench.get_system_title()}"
228228
graph = Graph(
229229
args,
230230
title,
231231
"Time [seconds]",
232232
unit,
233-
output_dir.joinpath(f"{trace.get_name()}/{bench.get_bench_name()}/{str(component_type)}"),
233+
output_dir.joinpath(f"{trace.get_name()}/{bench.get_bench_name()}/{component_type!s}"),
234234
outfile,
235235
show_source_file=trace,
236236
)
@@ -245,7 +245,7 @@ def generic_graph(
245245

246246
if args.verbose:
247247
print(
248-
f"{trace.get_name()}/{bench.get_bench_name()}: {len(components)} {str(component_type)} to graph with {samples_count} samples"
248+
f"{trace.get_name()}/{bench.get_bench_name()}: {len(components)} {component_type!s} to graph with {samples_count} samples"
249249
)
250250

251251
time_serie = []
@@ -280,7 +280,7 @@ def generic_graph(
280280
data_serie[component.get_full_name()].append(component.get_mean()[sample])
281281

282282
if second_axis:
283-
for _, entry in bench.get_monitoring_metric(second_axis).items():
283+
for entry in bench.get_monitoring_metric(second_axis).values():
284284
for sensor, measure in entry.items():
285285
# We don't plot the Cores here
286286
# We don't plot sensor on y2 if already plot on y1
@@ -366,7 +366,7 @@ def yerr_graph(
366366
)
367367
data_serie[MEAN].append(mean_value)
368368

369-
title = f'{prefix}{component.get_name()} during "{bench.get_bench_name()}" benchmark job\n' f"\n Stressor: "
369+
title = f'{prefix}{component.get_name()} during "{bench.get_bench_name()}" benchmark job\n\n Stressor: '
370370
title += f"{bench.workers()} x {bench.get_title_engine_name()} for {bench.duration()} seconds"
371371
title += f"\n{bench.get_system_title()}"
372372

@@ -375,7 +375,7 @@ def yerr_graph(
375375
title,
376376
"Time [seconds]",
377377
unit,
378-
output_dir.joinpath(f"{trace.get_name()}/{bench.get_bench_name()}/{str(component_type)}"),
378+
output_dir.joinpath(f"{trace.get_name()}/{bench.get_bench_name()}/{component_type!s}"),
379379
f"{prefix}{component.get_name()}",
380380
show_source_file=trace,
381381
)

graph/individual.py

+4-7
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ def individual_graph(args, output_dir, job: str, traces_name: list) -> int:
1616
benches = args.traces[0].get_benches_by_job_per_emp(job)
1717
# For all subjobs sharing the same engine module parameter
1818
# i.e int128
19-
for emp in benches.keys():
19+
for emp in benches:
2020
aggregated_perfs = {} # type: dict[str, dict[str, Any]]
2121
aggregated_perfs_watt = {} # type: dict[str, dict[str, Any]]
2222
aggregated_watt = {} # type: dict[str, dict[str, Any]]
@@ -27,7 +27,7 @@ def individual_graph(args, output_dir, job: str, traces_name: list) -> int:
2727
perf_list, unit = benches[emp]["metrics"]
2828
# For each metric we need to plot
2929
for perf in perf_list:
30-
if perf not in aggregated_perfs.keys():
30+
if perf not in aggregated_perfs:
3131
aggregated_perfs[perf] = {}
3232
aggregated_perfs_watt[perf] = {}
3333
aggregated_watt[perf] = {}
@@ -52,7 +52,7 @@ def individual_graph(args, output_dir, job: str, traces_name: list) -> int:
5252
for trace in args.traces:
5353
# Let's iterate on each Bench from this trace file matching this em
5454
for bench in trace.get_benches_by_job_per_emp(job)[emp]["bench"]:
55-
if bench.workers() not in aggregated_perfs[perf].keys():
55+
if bench.workers() not in aggregated_perfs[perf]:
5656
# If the worker count is not known yet, let's init all structures with as much zeros as the number of traces
5757
# This will be the default value in case of the host doesn't have performance results
5858
aggregated_perfs[perf][bench.workers()] = [0] * len(traces_name)
@@ -90,10 +90,7 @@ def individual_graph(args, output_dir, job: str, traces_name: list) -> int:
9090

9191
# Let's define the tree architecture based on the benchmark profile
9292
# If the benchmark has multiple performance results, let's put them in a specific directory
93-
if len(perf_list) > 1:
94-
outdir = outdir.joinpath(emp, perf)
95-
else:
96-
outdir = outdir.joinpath(emp)
93+
outdir = outdir.joinpath(emp, perf) if len(perf_list) > 1 else outdir.joinpath(emp)
9794

9895
# Select the proper datasource and titles/labels regarding the graph type
9996
if graph_type == "perf_watt":

graph/scaling.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ def scaling_graph(args, output_dir, job: str, traces_name: list) -> int:
2020
print(f"Scaling: working on job '{job}' : {len(benches.keys())} engine_module_parameter to render")
2121
# For all subjobs sharing the same engine module parameter
2222
# i.e int128
23-
for emp in benches.keys():
23+
for emp in benches:
2424
aggregated_perfs = {} # type: dict[str, dict[str, Any]]
2525
aggregated_perfs_watt = {} # type: dict[str, dict[str, Any]]
2626
aggregated_watt = {} # type: dict[str, dict[str, Any]]
@@ -38,7 +38,7 @@ def scaling_graph(args, output_dir, job: str, traces_name: list) -> int:
3838

3939
# For each metric we need to plot
4040
for perf in perf_list:
41-
if perf not in aggregated_perfs.keys():
41+
if perf not in aggregated_perfs:
4242
aggregated_perfs[perf] = {}
4343
aggregated_perfs_watt[perf] = {}
4444
aggregated_watt[perf] = {}
@@ -62,7 +62,7 @@ def scaling_graph(args, output_dir, job: str, traces_name: list) -> int:
6262

6363
# for each performance metric we have to plot,
6464
# let's prepare the data set to plot
65-
if trace.get_name() not in aggregated_perfs[perf].keys():
65+
if trace.get_name() not in aggregated_perfs[perf]:
6666
aggregated_perfs[perf][trace.get_name()] = []
6767
aggregated_perfs_watt[perf][trace.get_name()] = []
6868
aggregated_watt[perf][trace.get_name()] = []
@@ -112,7 +112,7 @@ def scaling_graph(args, output_dir, job: str, traces_name: list) -> int:
112112
outfile = f"scaling_{clean_perf}_{bench.get_title_engine_name().replace(' ','_')}"
113113
y_source = aggregated_perfs
114114

115-
title = f'{args.title}\n\n{graph_type_title} via "{job}" benchmark job\n' f"\n Stressor: "
115+
title = f'{args.title}\n\n{graph_type_title} via "{job}" benchmark job\n\n Stressor: '
116116
title += f"{bench.get_title_engine_name()} for {bench.duration()} seconds"
117117
xlabel = "Workers"
118118
# If we have a constent ratio between cores & workers, let's report them under the Xaxis

graph/trace.py

+18-17
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import json
22
import pathlib
33
from statistics import mean
4-
from typing import Any # noqa: F401
4+
from typing import Any
55

66
from graph.common import fatal
77
from hwbench.bench.monitoring_structs import (
@@ -19,6 +19,13 @@
1919
MEAN = "mean"
2020

2121

22+
METRIC_AXIs = {
23+
"Percent": (100, 10, 5),
24+
"RPM": (21000, 1000, 250),
25+
"Celsius": (110, 10, 5),
26+
}
27+
28+
2229
class Bench:
2330
def __init__(self, trace, bench_name: str):
2431
self.trace = trace
@@ -68,12 +75,12 @@ def load_monitoring(self):
6875
self.metrics = {}
6976
m = self.get_monitoring()
7077
if m:
71-
for metric in m.keys():
78+
for metric in m:
7279
if metric in MonitoringMetadata.list_str():
7380
self.metrics[metric] = m[metric]
7481
elif metric in Metrics.list_str():
7582
self.metrics[metric] = {}
76-
for component_family in m[metric].keys():
83+
for component_family in m[metric]:
7784
self.metrics[metric][component_family] = {}
7885
for measure in m[metric][component_family]:
7986
original_measure = m[metric][component_family][measure]
@@ -100,14 +107,7 @@ def get_monitoring_metric_by_name(self, metric: Metrics, metric_name: str) -> Mo
100107

101108
def get_monitoring_metric_axis(self, unit: str) -> tuple[Any, Any, Any]:
102109
"""Return adjusted metric axis values"""
103-
# return y_max, y_major_tick, y_minor_tick
104-
if unit == "Percent":
105-
return 100, 10, 5
106-
elif unit == "RPM":
107-
return 21000, 1000, 250
108-
elif unit == "Celsius":
109-
return 110, 10, 5
110-
return None, None, None
110+
return METRIC_AXIs.get(unit, (None, None, None))
111111

112112
def get_component(self, metric_type: Metrics, component: Any) -> dict[str, MonitorMetric]:
113113
return self.get_monitoring_metric(metric_type)[str(component)]
@@ -153,8 +153,8 @@ def get_system_title(self):
153153
d = self.get_trace().get_dmi()
154154
c = self.get_trace().get_cpu()
155155
k = self.get_trace().get_kernel()
156-
title = f"System: {d['serial']} {d['product']} Bios " f"v{d['bios']['version']} Linux Kernel {k['release']}"
157-
title += f"\nProcessor: {c['model']} with {c['physical_cores']} cores " f"and {c['numa_domains']} NUMA domains"
156+
title = f"System: {d['serial']} {d['product']} Bios v{d['bios']['version']} Linux Kernel {k['release']}"
157+
title += f"\nProcessor: {c['model']} with {c['physical_cores']} cores and {c['numa_domains']} NUMA domains"
158158
return title
159159

160160
def job_name(self) -> str:
@@ -449,10 +449,11 @@ def validate(self) -> None:
449449
def _list_power_metrics(self) -> list[str]:
450450
first_bench = self.first_bench()
451451
first_bench.load_monitoring()
452-
power_metrics = []
453-
for name, value in first_bench.get_monitoring_metric(Metrics.POWER_CONSUMPTION).items():
454-
for v in value:
455-
power_metrics.append(f"{name}.{v}")
452+
power_metrics = [
453+
f"{name}.{v}"
454+
for name, value in first_bench.get_monitoring_metric(Metrics.POWER_CONSUMPTION).items()
455+
for v in value
456+
]
456457
return power_metrics
457458

458459
def list_power_metrics(self):

hwbench/bench/benchmark.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
import time
22
from typing import Any
33

4-
from ..utils import helpers as h
5-
from ..utils.external import External
4+
from hwbench.utils import helpers as h
5+
from hwbench.utils.external import External
6+
67
from .engine import EngineModuleBase
78
from .parameters import BenchmarkParameters
89

@@ -50,7 +51,7 @@ def validate_parameters(self):
5051
p = self.get_parameters()
5152
error = e.validate_module_parameters(p)
5253
if error:
53-
h.fatal(f"Unsupported parameter for {e.get_engine().get_name()}/" f"{e.get_name()}: {error}")
54+
h.fatal(f"Unsupported parameter for {e.get_engine().get_name()}/{e.get_name()}: {error}")
5455

5556
def run(self):
5657
e = self.get_enginemodule()
@@ -93,7 +94,7 @@ def fully_skipped_job(self) -> bool:
9394
if not self.skip:
9495
return False
9596

96-
if self.parameters.get_skip_method() == "wait":
97+
if self.parameters.get_skip_method() == "wait": # noqa: SIM103
9798
# The job is skipped but we were asked to make a no-op run
9899
return False
99100

hwbench/bench/benchmarks.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,9 @@
44
import time
55
from datetime import timedelta
66

7-
from ..environment.hardware import BaseHardware
8-
from ..utils import helpers as h
7+
from hwbench.environment.hardware import BaseHardware
8+
from hwbench.utils import helpers as h
9+
910
from .benchmark import Benchmark
1011
from .monitoring import Monitoring
1112
from .parameters import BenchmarkParameters

hwbench/bench/engine.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,9 @@
33
import abc
44
import pathlib
55

6-
from ..utils.external import External
7-
from ..utils.helpers import fatal
6+
from hwbench.utils.external import External
7+
from hwbench.utils.helpers import fatal
8+
89
from .parameters import BenchmarkParameters
910

1011

@@ -51,7 +52,7 @@ def __init__(self, name: str, binary: str, modules: dict[str, EngineModuleBase]
5152
self.modules = modules
5253
# FIXME: If the import is done at the file level, the mocking is lost here
5354
# So I'm importing is_binary_available just before the call :/
54-
from ..utils.helpers import is_binary_available
55+
from hwbench.utils.helpers import is_binary_available
5556

5657
if not is_binary_available(self.binary):
5758
fatal(f"Engine {name} requires '{binary}' binary, please install it.")
@@ -76,4 +77,4 @@ def get_module(self, module_name: str) -> EngineModuleBase | None:
7677
return self.modules.get(module_name)
7778

7879
def module_exists(self, module_name) -> bool:
79-
return module_name in self.modules.keys()
80+
return module_name in self.modules

hwbench/bench/monitoring.py

+6-5
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,10 @@
22
from threading import Thread
33
from typing import Any
44

5-
from ..environment.hardware import BaseHardware
6-
from ..environment.turbostat import Turbostat
7-
from ..utils import helpers as h
5+
from hwbench.environment.hardware import BaseHardware
6+
from hwbench.environment.turbostat import Turbostat
7+
from hwbench.utils import helpers as h
8+
89
from .monitoring_structs import Metrics, MonitoringMetadata, MonitorMetric
910

1011

@@ -63,10 +64,10 @@ def prepare(self):
6364
def check_monitoring(source: str, metric: Metrics):
6465
data = self.get_metric(metric)
6566
if not len(data):
66-
h.fatal(f"Cannot detect {str(metric)} metrics")
67+
h.fatal(f"Cannot detect {metric!s} metrics")
6768

6869
print(
69-
f"Monitoring/{source}: {str(metric)} metrics:"
70+
f"Monitoring/{source}: {metric!s} metrics:"
7071
+ ", ".join([f"{len(data[pc])}x{pc}" for pc in data if len(data[pc]) > 0])
7172
)
7273

hwbench/bench/parameters.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import pathlib
22

3-
from ..environment.hardware import BaseHardware
3+
from hwbench.environment.hardware import BaseHardware
4+
45
from .monitoring import Monitoring
56

67

hwbench/bench/test_benchmarks.py

+5-6
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
import pathlib
22
from unittest.mock import patch
33

4+
import pytest
5+
46
from . import test_benchmarks_common as tbc
57
from .monitoring_structs import Metrics
68

@@ -45,16 +47,13 @@ def test_parsing(self):
4547
for job in range(196, 199):
4648
self.assert_job(job, "check_physical_core_int8_perf", "cpu", "int8")
4749
# Ensure the auto syntax updated the number of engine instances
48-
if job == 198:
49-
instances = 4
50-
else:
51-
instances = 2
50+
instances = 4 if job == 198 else 2
5251
assert self.get_bench_parameters(job).get_engine_instances_count() == instances
5352

5453
group_count = 0
5554
for job in range(199, 203):
5655
group_count += 2
57-
self.assert_job(job, "check_physical_core_scale_plus_1_int8_perf", "cpu", "int8") # noqa: E501
56+
self.assert_job(job, "check_physical_core_scale_plus_1_int8_perf", "cpu", "int8")
5857
assert self.get_bench_parameters(job).get_engine_instances_count() == group_count
5958
assert len(self.get_bench_parameters(job).get_pinned_cpu()) == group_count
6059

@@ -89,7 +88,7 @@ def test_stream_short(self):
8988
self.load_benches("./hwbench/config/stream.ini")
9089
assert self.get_jobs_config().get_config().getint("global", "runtime") == 5
9190
self.get_jobs_config().get_config().set("global", "runtime", "2")
92-
with self.assertRaises(SystemExit):
91+
with pytest.raises(SystemExit):
9392
self.parse_jobs_config()
9493
# This jobs_config file doesn't need monitoring
9594
assert self.benches.need_monitoring() is False

0 commit comments

Comments
 (0)