forked from ultralytics/yolov5
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Improved
detect.py
timing (ultralytics#4741)
* Improved detect.py timing * Eliminate 1 time_sync() call * Inference-only time * dash * #Save section * Cleanup
- Loading branch information
1 parent
c5360f6
commit 7af1b4c
Showing
2 changed files
with
22 additions
and
18 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -154,22 +154,22 @@ def run(data, | |
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} | ||
class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) | ||
s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', '[email protected]', '[email protected]:.95') | ||
p, r, f1, mp, mr, map50, map, t0, t1, t2 = 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. | ||
dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 | ||
loss = torch.zeros(3, device=device) | ||
jdict, stats, ap, ap_class = [], [], [], [] | ||
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): | ||
t_ = time_sync() | ||
t1 = time_sync() | ||
img = img.to(device, non_blocking=True) | ||
img = img.half() if half else img.float() # uint8 to fp16/32 | ||
img /= 255.0 # 0 - 255 to 0.0 - 1.0 | ||
targets = targets.to(device) | ||
nb, _, height, width = img.shape # batch size, channels, height, width | ||
t = time_sync() | ||
t0 += t - t_ | ||
t2 = time_sync() | ||
dt[0] += t2 - t1 | ||
|
||
# Run model | ||
out, train_out = model(img, augment=augment) # inference and training outputs | ||
t1 += time_sync() - t | ||
dt[1] += time_sync() - t2 | ||
|
||
# Compute loss | ||
if compute_loss: | ||
|
@@ -178,9 +178,9 @@ def run(data, | |
# Run NMS | ||
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels | ||
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling | ||
t = time_sync() | ||
t3 = time_sync() | ||
out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) | ||
t2 += time_sync() - t | ||
dt[2] += time_sync() - t3 | ||
|
||
# Statistics per image | ||
for si, pred in enumerate(out): | ||
|
@@ -247,7 +247,7 @@ def run(data, | |
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) | ||
|
||
# Print speeds | ||
t = tuple(x / seen * 1E3 for x in (t0, t1, t2)) # speeds per image | ||
t = tuple(x / seen * 1E3 for x in dt) # speeds per image | ||
if not training: | ||
shape = (batch_size, 3, imgsz, imgsz) | ||
print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) | ||
|