We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 01b5408 commit f356a92Copy full SHA for f356a92
benchmarks/torchbench_model.py
@@ -191,6 +191,7 @@ def add_torchbench_dir(self):
191
else:
192
raise Exception("Torch Benchmark folder not found.")
193
194
+ print("this is the torchbench folder.")
195
return torchbench_dir
196
197
def list_model_configs(self):
@@ -373,6 +374,9 @@ def is_accelerator_tpu(self):
373
374
return self.benchmark_experiment.accelerator == "tpu"
375
376
def use_amp(self):
377
+ # AMP is only supported on cuda and tpu, not on cpu.
378
+ if self.benchmark_experiment.accelerator == "cpu":
379
+ return False
380
return self.is_training() or self.model_name in config(
381
).dtype.force_amp_for_fp16_bf16_models
382
0 commit comments