11
11
from rich .table import Table
12
12
from rich .text import Text
13
13
14
+ from pytest_codspeed import __semver_version__
14
15
from pytest_codspeed .instruments import Instrument
16
+ from pytest_codspeed .instruments .hooks import InstrumentHooks
15
17
16
18
if TYPE_CHECKING :
17
19
from typing import Any , Callable
@@ -131,17 +133,26 @@ class Benchmark:
131
133
132
134
133
135
def run_benchmark (
134
- name : str , uri : str , fn : Callable [P , T ], args , kwargs , config : BenchmarkConfig
136
+ instrument_hooks : InstrumentHooks | None ,
137
+ name : str ,
138
+ uri : str ,
139
+ fn : Callable [P , T ],
140
+ args ,
141
+ kwargs ,
142
+ config : BenchmarkConfig ,
135
143
) -> tuple [Benchmark , T ]:
144
+ def __codspeed_root_frame__ () -> T :
145
+ return fn (* args , ** kwargs )
146
+
136
147
# Compute the actual result of the function
137
- out = fn ( * args , ** kwargs )
148
+ out = __codspeed_root_frame__ ( )
138
149
139
150
# Warmup
140
151
times_per_round_ns : list [float ] = []
141
152
warmup_start = start = perf_counter_ns ()
142
153
while True :
143
154
start = perf_counter_ns ()
144
- fn ( * args , ** kwargs )
155
+ __codspeed_root_frame__ ( )
145
156
end = perf_counter_ns ()
146
157
times_per_round_ns .append (end - start )
147
158
if end - warmup_start > config .warmup_time_ns :
@@ -166,16 +177,21 @@ def run_benchmark(
166
177
# Benchmark
167
178
iter_range = range (iter_per_round )
168
179
run_start = perf_counter_ns ()
180
+ if instrument_hooks :
181
+ instrument_hooks .start_benchmark ()
169
182
for _ in range (rounds ):
170
183
start = perf_counter_ns ()
171
184
for _ in iter_range :
172
- fn ( * args , ** kwargs )
185
+ __codspeed_root_frame__ ( )
173
186
end = perf_counter_ns ()
174
187
times_per_round_ns .append (end - start )
175
188
176
189
if end - run_start > config .max_time_ns :
177
190
# TODO: log something
178
191
break
192
+ if instrument_hooks :
193
+ instrument_hooks .stop_benchmark ()
194
+ instrument_hooks .set_executed_benchmark (uri )
179
195
benchmark_end = perf_counter_ns ()
180
196
total_time = (benchmark_end - run_start ) / 1e9
181
197
@@ -192,8 +208,15 @@ def run_benchmark(
192
208
193
209
class WallTimeInstrument (Instrument ):
194
210
instrument = "walltime"
211
+ instrument_hooks : InstrumentHooks | None
195
212
196
213
def __init__ (self , config : CodSpeedConfig ) -> None :
214
+ try :
215
+ self .instrument_hooks = InstrumentHooks ()
216
+ self .instrument_hooks .set_integration ("pytest-codspeed" , __semver_version__ )
217
+ except RuntimeError :
218
+ self .instrument_hooks = None
219
+
197
220
self .config = config
198
221
self .benchmarks : list [Benchmark ] = []
199
222
@@ -209,6 +232,7 @@ def measure(
209
232
** kwargs : P .kwargs ,
210
233
) -> T :
211
234
bench , out = run_benchmark (
235
+ instrument_hooks = self .instrument_hooks ,
212
236
name = name ,
213
237
uri = uri ,
214
238
fn = fn ,
0 commit comments