Skip to content

Commit 1cd472f

Browse files
committed
what's happening.
1 parent 7367bb1 commit 1cd472f

File tree

2 files changed

+7
-16
lines changed

2 files changed

+7
-16
lines changed

benchmarks/benchmarking_utils.py

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,9 @@
99
import torch.utils.benchmark as benchmark
1010

1111
from diffusers.models.modeling_utils import ModelMixin
12-
from diffusers.utils import logging
1312
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
1413

1514

16-
logger = logging.get_logger(__name__)
17-
18-
1915
def benchmark_fn(f, *args, **kwargs):
2016
t0 = benchmark.Timer(
2117
stmt="f(*args, **kwargs)",
@@ -105,7 +101,7 @@ def post_benchmark(self, model):
105101
@torch.no_grad()
106102
def run_benchmark(self, scenario: BenchmarkScenario):
107103
# 0) Basic stats
108-
logger.info(f"Running scenario: {scenario.name}.")
104+
print(f"Running scenario: {scenario.name}.")
109105
model = model_init_fn(scenario.model_cls, **scenario.model_init_kwargs)
110106
num_params = round(calculate_params(model) / 1e6, 2)
111107
flops = round(calculate_flops(model, input_dict=scenario.get_model_input_dict()) / 1e6, 2)
@@ -125,7 +121,7 @@ def run_benchmark(self, scenario: BenchmarkScenario):
125121
compile_kwargs=None,
126122
)
127123
except Exception as e:
128-
logger.error(f"Benchmark could not be run with the following error\n: {e}")
124+
print(f"Benchmark could not be run with the following error\n: {e}")
129125
return results
130126

131127
# 2) compiled stats (if any)
@@ -140,7 +136,7 @@ def run_benchmark(self, scenario: BenchmarkScenario):
140136
compile_kwargs=scenario.compile_kwargs,
141137
)
142138
except Exception as e:
143-
logger.error(f"Compilation benchmark could not be run with the following error\n: {e}")
139+
print(f"Compilation benchmark could not be run with the following error\n: {e}")
144140
if plain is None:
145141
return results
146142

@@ -170,10 +166,10 @@ def run_bencmarks_and_collate(self, scenarios: Union[BenchmarkScenario, list[Ben
170166
try:
171167
records.append(self.run_benchmark(s))
172168
except Exception as e:
173-
logger.error(f"Running scenario ({s.name}) led to error:\n{e}")
169+
print(f"Running scenario ({s.name}) led to error:\n{e}")
174170
df = pd.DataFrame.from_records([r for r in records if r])
175171
df.to_csv(filename, index=False)
176-
logger.info(f"Results serialized to {filename=}.")
172+
print(f"Results serialized to {filename=}.")
177173

178174
def _run_phase(
179175
self,

benchmarks/run_all.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,17 +4,12 @@
44

55
import pandas as pd
66

7-
from diffusers.utils import logging
8-
97

108
PATTERN = "benchmarking_*.py"
119
FINAL_CSV_FILENAME = "collated_results.csv"
1210
GITHUB_SHA = os.getenv("GITHUB_SHA", None)
1311

1412

15-
logger = logging.get_logger(__name__)
16-
17-
1813
class SubprocessCallException(Exception):
1914
pass
2015

@@ -42,12 +37,12 @@ def run_scripts():
4237

4338
for file in python_files:
4439
if file != "benchmarking_utils.py":
45-
logger.info(f"****** Running file: {file} ******")
40+
print(f"****** Running file: {file} ******")
4641
command = f"python {file}"
4742
try:
4843
run_command(command.split())
4944
except SubprocessCallException as e:
50-
logger.error(f"Error running {file}: {e}")
45+
print(f"Error running {file}: {e}")
5146
continue
5247

5348

0 commit comments

Comments
 (0)