diff --git a/deploying_service/deploying_pytorch/pytorch_flask_service/requirements.txt b/deploying_service/deploying_pytorch/pytorch_flask_service/requirements.txt index d3bd30fb0..6bdc78390 100644 --- a/deploying_service/deploying_pytorch/pytorch_flask_service/requirements.txt +++ b/deploying_service/deploying_pytorch/pytorch_flask_service/requirements.txt @@ -1,3 +1,3 @@ Flask==1.1.1 Flask_Cors==3.0.9 -Pillow==7.2.0 +Pillow==8.1.1 diff --git a/pytorch_classification/tensorboard_test/requirements.txt b/pytorch_classification/tensorboard_test/requirements.txt index 9010b91fd..8ee07b161 100644 --- a/pytorch_classification/tensorboard_test/requirements.txt +++ b/pytorch_classification/tensorboard_test/requirements.txt @@ -2,5 +2,5 @@ torchvision==0.7.0 tqdm==4.42.1 matplotlib==3.2.1 torch==1.6.0 -Pillow==8.0.1 +Pillow==8.1.1 tensorboard==2.2.2 diff --git a/pytorch_object_detection/faster_rcnn/requirements.txt b/pytorch_object_detection/faster_rcnn/requirements.txt index f3e8b9859..5743e3518 100644 --- a/pytorch_object_detection/faster_rcnn/requirements.txt +++ b/pytorch_object_detection/faster_rcnn/requirements.txt @@ -5,4 +5,4 @@ tqdm==4.42.1 torch==1.6.0 torchvision==0.7.0 pycocotools==2.0.0 -Pillow==8.0.1 +Pillow==8.1.1 diff --git a/pytorch_object_detection/ssd/plot_curve.py b/pytorch_object_detection/ssd/plot_curve.py index 1ca3653d4..cee7880ac 100644 --- a/pytorch_object_detection/ssd/plot_curve.py +++ b/pytorch_object_detection/ssd/plot_curve.py @@ -6,7 +6,7 @@ def plot_loss_and_lr(train_loss, learning_rate): x = list(range(len(train_loss))) fig, ax1 = plt.subplots(1, 1) ax1.plot(x, train_loss, 'r', label='loss') - ax1.set_xlabel("step") + ax1.set_xlabel("epoch") ax1.set_ylabel("loss") ax1.set_title("Train Loss and lr") plt.legend(loc='best') diff --git a/pytorch_object_detection/ssd/predict_test.py b/pytorch_object_detection/ssd/predict_test.py index 4d813c0bf..59c4a0870 100644 --- a/pytorch_object_detection/ssd/predict_test.py +++ b/pytorch_object_detection/ssd/predict_test.py @@ -37,9 +37,6 @@ def main(): model.load_state_dict(train_weights_dict, strict=False) model.to(device) - # initial model - init_img = torch.zeros((1, 3, 300, 300), device=device) - model(init_img) # read class_indict json_path = "./pascal_voc_classes.json" @@ -61,6 +58,10 @@ def main(): model.eval() with torch.no_grad(): + # initial model + init_img = torch.zeros((1, 3, 300, 300), device=device) + model(init_img) + time_start = time_synchronized() predictions = model(img.to(device))[0] # bboxes_out, labels_out, scores_out time_end = time_synchronized() diff --git a/pytorch_object_detection/ssd/requirements.txt b/pytorch_object_detection/ssd/requirements.txt index fd181036d..3a034bdfd 100644 --- a/pytorch_object_detection/ssd/requirements.txt +++ b/pytorch_object_detection/ssd/requirements.txt @@ -5,4 +5,4 @@ pycocotools==2.0.0 torch==1.6.0 torchvision==0.7.0 lxml==4.6.2 -Pillow==8.0.1 +Pillow==8.1.1 diff --git a/pytorch_object_detection/ssd/src/utils.py b/pytorch_object_detection/ssd/src/utils.py index e934f3df3..a3bc23779 100644 --- a/pytorch_object_detection/ssd/src/utils.py +++ b/pytorch_object_detection/ssd/src/utils.py @@ -590,13 +590,15 @@ def decode_single_new(self, bboxes_in, scores_in, criteria, num_output): # remove low scoring boxes # 移除低概率目标,self.scores_thresh=0.05 - inds = torch.nonzero(scores_in > 0.05).squeeze(1) + # inds = torch.nonzero(scores_in > 0.05).squeeze(1) + inds = torch.where(torch.gt(scores_in, 0.05))[0] bboxes_in, scores_in, labels = bboxes_in[inds, :], scores_in[inds], labels[inds] # remove empty boxes ws, hs = bboxes_in[:, 2] - bboxes_in[:, 0], bboxes_in[:, 3] - bboxes_in[:, 1] keep = (ws >= 1 / 300) & (hs >= 1 / 300) - keep = keep.nonzero().squeeze(1) + # keep = keep.nonzero().squeeze(1) + keep = torch.where(keep)[0] bboxes_in, scores_in, labels = bboxes_in[keep], scores_in[keep], labels[keep] # non-maximum suppression diff --git a/pytorch_object_detection/ssd/train_ssd300.py b/pytorch_object_detection/ssd/train_ssd300.py index 8375432d6..35676ca5b 100644 --- a/pytorch_object_detection/ssd/train_ssd300.py +++ b/pytorch_object_detection/ssd/train_ssd300.py @@ -1,4 +1,5 @@ import os +import datetime import torch @@ -45,6 +46,8 @@ def main(parser_data): if not os.path.exists("save_weights"): os.mkdir("save_weights") + results_file = "results{}.txt".format(datetime.datetime.now().strftime("%Y%m%d-%H%M%S")) + data_transform = { "train": transform.Compose([transform.SSDCropping(), transform.Resize(), @@ -113,16 +116,26 @@ def main(parser_data): # 提前加载验证集数据,以免每次验证时都要重新加载一次数据,节省时间 val_data = get_coco_api_from_dataset(val_data_loader.dataset) for epoch in range(parser_data.start_epoch, parser_data.epochs): - utils.train_one_epoch(model=model, optimizer=optimizer, - data_loader=train_data_loader, - device=device, epoch=epoch, - print_freq=50, train_loss=train_loss, - train_lr=learning_rate) - + mean_loss, lr = utils.train_one_epoch(model=model, optimizer=optimizer, + data_loader=train_data_loader, + device=device, epoch=epoch, + print_freq=50) + train_loss.append(mean_loss.item()) + learning_rate.append(lr) + + # update learning rate lr_scheduler.step() - utils.evaluate(model=model, data_loader=val_data_loader, - device=device, data_set=val_data, mAP_list=val_map) + coco_info = utils.evaluate(model=model, data_loader=val_data_loader, + device=device, data_set=val_data) + + # write into txt + with open(results_file, "a") as f: + result_info = [str(round(i, 4)) for i in coco_info + [mean_loss.item(), lr]] + txt = "epoch:{} {}".format(epoch, ' '.join(result_info)) + f.write(txt + "\n") + + val_map.append(coco_info[1]) # pascal mAP # save weights save_files = { diff --git a/pytorch_object_detection/ssd/train_utils/train_eval_utils.py b/pytorch_object_detection/ssd/train_utils/train_eval_utils.py index dd07006c4..fcb750e81 100644 --- a/pytorch_object_detection/ssd/train_utils/train_eval_utils.py +++ b/pytorch_object_detection/ssd/train_utils/train_eval_utils.py @@ -10,8 +10,8 @@ import train_utils.distributed_utils as utils -def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq, - train_loss=None, train_lr=None, warmup=False): +def train_one_epoch(model, optimizer, data_loader, device, epoch, + print_freq=50, warmup=False): model.train() metric_logger = utils.MetricLogger(delimiter=" ") metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) @@ -24,7 +24,8 @@ def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq, lr_scheduler = utils.warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor) - for images, targets in metric_logger.log_every(data_loader, print_freq, header): + mloss = torch.zeros(1).to(device) # mean losses + for i, [images, targets] in enumerate(metric_logger.log_every(data_loader, print_freq, header)): # batch inputs information images = torch.stack(images, dim=0) @@ -49,10 +50,9 @@ def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq, losses_dict_reduced = utils.reduce_dict(losses_dict) losses_reduce = losses_dict_reduced["total_losses"] - loss_value = losses_reduce.item() - if isinstance(train_loss, list): - # 记录训练损失 - train_loss.append(loss_value) + loss_value = losses_reduce.detach() + # 记录训练损失 + mloss = (mloss * i + loss_value) / (i + 1) # update mean losses if not math.isfinite(loss_value): # 当计算的损失为无穷大时停止训练 print("Loss is {}, stopping training".format(loss_value)) @@ -70,12 +70,12 @@ def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq, metric_logger.update(**losses_dict_reduced) now_lr = optimizer.param_groups[0]["lr"] metric_logger.update(lr=now_lr) - if isinstance(train_lr, list): - train_lr.append(now_lr) + + return mloss, now_lr @torch.no_grad() -def evaluate(model, data_loader, device, data_set=None, mAP_list=None): +def evaluate(model, data_loader, device, data_set=None): n_threads = torch.get_num_threads() # FIXME remove this and make paste_masks_in_image run on the GPU torch.set_num_threads(1) @@ -138,12 +138,9 @@ def evaluate(model, data_loader, device, data_set=None, mAP_list=None): coco_evaluator.summarize() torch.set_num_threads(n_threads) - print_txt = coco_evaluator.coco_eval[iou_types[0]].stats - coco_mAP = print_txt[0] - voc_mAP = print_txt[1] - if isinstance(mAP_list, list): - mAP_list.append(voc_mAP) - # return coco_evaluator + coco_info = coco_evaluator.coco_eval[iou_types[0]].stats.tolist() # numpy to list + + return coco_info def _get_iou_types(model):