Skip to content

Commit

Permalink
[Benchmarking-Py] - Release 2.0.1 - Change to perf_counter()
Browse files Browse the repository at this point in the history
  • Loading branch information
DEKHTIARJonathan committed Sep 30, 2022
1 parent a40ca6a commit 62c24e2
Show file tree
Hide file tree
Showing 7 changed files with 20 additions and 15 deletions.
5 changes: 5 additions & 0 deletions tftrt/benchmarking-python/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,11 @@ Description of the change

<!-- YOU CAN EDIT FROM HERE -->

## [2.0.1] - 2022.09.30 - @DEKHTIARJonathan

- Change from `time.time()` to `time.perf_counter()` for better time
measurement.

## [2.0.0] - 2022.08.04 - @DEKHTIARJonathan

- Fix for XLA FP16 actually not being applied due to `"min_graph_nodes": -1`
Expand Down
4 changes: 2 additions & 2 deletions tftrt/benchmarking-python/benchmark_autotuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,9 @@ def __init__(self, funcs, calls_per_func, skip_n_first):
def _autotune(self, *arg, **kwargs):
fn_id = self._call_counter // self._calls_per_func
try:
start_t = time.time()
start_t = time.perf_counter()
output = self._fns[fn_id](*arg, **kwargs)
self._timings[fn_id].append(time.time() - start_t)
self._timings[fn_id].append(time.perf_counter() - start_t)
except IndexError:
print() # visual spacing
logging.debug("AutoTuning is over... Collecting timing statistics:")
Expand Down
2 changes: 1 addition & 1 deletion tftrt/benchmarking-python/benchmark_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
# The `__version__` number shall be updated everytime core benchmarking files
# are updated.
# Please update CHANGELOG.md with a description of what this version changed.
__version__ = "2.0.0"
__version__ = "2.0.1"


def get_commit_id():
Expand Down
12 changes: 6 additions & 6 deletions tftrt/benchmarking-python/benchmark_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -589,9 +589,9 @@ def start_profiling():

with tracing_ctx('Input Dequeueing'):
try:
start_time = time.time()
start_time = time.perf_counter()
data_batch = dequeue_batch_fn()
dequeue_times.append(time.time() - start_time)
dequeue_times.append(time.perf_counter() - start_time)
except (StopIteration, OutOfRangeError):
logging.info("[Exiting] Reached end of dataset ...")
break
Expand All @@ -600,14 +600,14 @@ def start_profiling():
x, y = self.preprocess_model_inputs(data_batch)

with tracing_ctx('Inputs MemcpyHtoD'):
start_time = time.time()
start_time = time.perf_counter()
x = force_data_on_gpu_fn(x)
memcopy_times.append(time.time() - start_time)
memcopy_times.append(time.perf_counter() - start_time)

with tracing_ctx('GPU Inference'):
start_time = time.time()
start_time = time.perf_counter()
y_pred = infer_batch(x)
iter_times.append(time.time() - start_time)
iter_times.append(time.perf_counter() - start_time)

if not self._args.debug_performance:
log_step(
Expand Down
4 changes: 2 additions & 2 deletions tftrt/benchmarking-python/benchmark_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,9 @@ def timed_section(msg, activate=True, start_end_mode=True):
if start_end_mode:
logging.info(f"[START] {msg} ...")

start_time = time.time()
start_time = time.perf_counter()
yield
total_time = time.time() - start_time
total_time = time.perf_counter() - start_time

if start_end_mode:
logging.info(f"[END] {msg} - Duration: {total_time:.1f}s")
Expand Down
4 changes: 2 additions & 2 deletions tftrt/benchmarking-python/template/infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,9 +137,9 @@ def evaluate_model(self, predictions, expected, bypass_data_to_eval):
# let's say transforming a list into a dict() or reverse
batch = preprocess_model_inputs(batch)

start_t = time.time()
start_t = time.perf_counter()
outputs = model_fn(batch)
print(f"Inference Time: {(time.time() - start_t)*1000:.1f}ms") # 0.001
print(f"Inference Time: {(time.perf_counter() - start_t)*1000:.1f}ms") # 0.001

## post my outputs to "measure accuracy"
## note: we skip that
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,10 +132,10 @@ def calibration_input_fn():
for step in range(1, INFERENCE_STEPS + 1):
if step % 100 == 0:
print("Processing step: %04d ..." % step)
start_t = time.time()
start_t = time.perf_counter()
probs = infer(features)[output_tensorname]
inferred_class = tf.math.argmax(probs).numpy()
step_time = time.time() - start_t
step_time = time.perf_counter() - start_t
if step >= WARMUP_STEPS:
step_times.append(step_time)
except tf.errors.OutOfRangeError:
Expand Down

0 comments on commit 62c24e2

Please sign in to comment.