Skip to content

Commit

Permalink
update code
Browse files Browse the repository at this point in the history
  • Loading branch information
wz authored and wz committed Oct 20, 2020
1 parent f16c84b commit 92720c9
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 8 deletions.
4 changes: 2 additions & 2 deletions pytorch_object_detection/yolov3_spp/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,8 +174,8 @@ def train(hyp):
# start training
# caching val_data when you have plenty of memory(RAM)
print("caching val_data for evaluation.")
coco = None
# coco = get_coco_api_from_dataset(val_dataset)
# coco = None
coco = get_coco_api_from_dataset(val_dataset)
print("starting traning for %g epochs..." % epochs)
print('Using %g dataloader workers' % nw)
for epoch in range(start_epoch, epochs):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def train_one_epoch(model, optimizer, data_loader, device, epoch,
print("training image path: {}".format(",".join(paths)))
sys.exit(1)

losses *= 1 / accumulate # scale loss
losses *= 1. / accumulate # scale loss

# backward
scaler.scale(losses).backward()
Expand Down
19 changes: 14 additions & 5 deletions pytorch_object_detection/yolov3_spp/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -435,18 +435,25 @@ def build_targets(p, targets, model):

style = None
multi_gpu = type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
for i, j in enumerate(model.yolo_layers):
for i, j in enumerate(model.yolo_layers): # [89, 101, 113]
# 获取该yolo predictor对应的anchors
anchors = model.module.module_list[j].anchor_vec if multi_gpu else model.module_list[j].anchor_vec
gain[2:] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
na = anchors.shape[0] # number of anchors
# [3] -> [3, 1] -> [3, nt]
at = torch.arange(na).view(na, 1).repeat(1, nt) # anchor tensor, same as .repeat_interleave(nt)

# Match targets to anchors
a, t, offsets = [], targets * gain, 0
if nt:
if nt: # 如果存在target的话
# r = t[None, :, 4:6] / anchors[:, None] # wh ratio
# j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare

# iou_t = 0.20
# j: [3, nt]
j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n) = wh_iou(anchors(3,2), gwh(n,2))
# t.repeat(na, 1, 1): [nt, 6] -> [3, nt, 6]
# 获取iou大于阈值的anchor与target对应信息
a, t = at[j], t.repeat(na, 1, 1)[j] # filter

# overlaps
Expand All @@ -466,18 +473,20 @@ def build_targets(p, targets, model):
offsets = torch.cat((z, z[j] + off[0], z[k] + off[1], z[l] + off[2], z[m] + off[3]), 0) * g

# Define
# long等于to(torch.int64), 数值向下取整
b, c = t[:, :2].long().T # image, class
gxy = t[:, 2:4] # grid xy
gwh = t[:, 4:6] # grid wh
gij = (gxy - offsets).long()
gij = (gxy - offsets).long() # 匹配targets所在的grid cell左上角坐标
gi, gj = gij.T # grid xy indices

# Append
indices.append((b, a, gj, gi)) # image, anchor, grid indices
tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
indices.append((b, a, gj, gi)) # image, anchor, grid indices(x, y)
tbox.append(torch.cat((gxy - gij, gwh), 1)) # gt box相对anchor的x,y偏移量以及w,h
anch.append(anchors[a]) # anchors
tcls.append(c) # class
if c.shape[0]: # if any targets
# 目标的标签数值不能大于给定的目标类别数
assert c.max() < model.nc, 'Model accepts %g classes labeled from 0-%g, however you labelled a class %g. ' \
'See https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data' % (
model.nc, model.nc - 1, c.max())
Expand Down

0 comments on commit 92720c9

Please sign in to comment.