Skip to content

Commit

Permalink
2024-06-10 23:50 commit in Ning
Browse files Browse the repository at this point in the history
  • Loading branch information
sober-orange committed Jun 10, 2024
1 parent 769e6e6 commit cec270d
Show file tree
Hide file tree
Showing 5 changed files with 153 additions and 2 deletions.
63 changes: 63 additions & 0 deletions configs/stgcn++/stgcn++_ntu120_xsub_hrnet/jnew.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
model = dict(
type='RecognizerGCN',
backbone=dict(
type='STGCN',
gcn_adaptive='init',
gcn_with_res=True,
tcn_type='mstcn',
graph_cfg=dict(layout='coco', mode='spatial')),
cls_head=dict(type='GCNHead', num_classes=10, in_channels=256))

dataset_type = 'PoseDataset'
ann_file = 'tools/data/Weizman/Wei_xsub_stgn++.pkl'
train_pipeline = [
dict(type='PreNormalize2D'),
dict(type='GenSkeFeat', dataset='coco', feats=['j']),
dict(type='UniformSample', clip_len=100),
dict(type='PoseDecode'),
dict(type='FormatGCNInput', num_person=2),
dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['keypoint'])
]
val_pipeline = [
dict(type='PreNormalize2D'),
dict(type='GenSkeFeat', dataset='coco', feats=['j']),
dict(type='UniformSample', clip_len=100, num_clips=1),
dict(type='PoseDecode'),
dict(type='FormatGCNInput', num_person=2),
dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['keypoint'])
]
test_pipeline = [
dict(type='PreNormalize2D'),
dict(type='GenSkeFeat', dataset='coco', feats=['j']),
dict(type='UniformSample', clip_len=100, num_clips=10),
dict(type='PoseDecode'),
dict(type='FormatGCNInput', num_person=2),
dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['keypoint'])
]
data = dict(
videos_per_gpu=16,
workers_per_gpu=2,
test_dataloader=dict(videos_per_gpu=1),
train=dict(
type='RepeatDataset',
times=5,
dataset=dict(type=dataset_type, ann_file=ann_file, pipeline=train_pipeline, split='xsub_train')),
val=dict(type=dataset_type, ann_file=ann_file, pipeline=val_pipeline, split='xsub_val'),
test=dict(type=dataset_type, ann_file=ann_file, pipeline=test_pipeline, split='xsub_val'))

# optimizer
optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
total_epochs = 16
checkpoint_config = dict(interval=1)
evaluation = dict(interval=1, metrics=['top_k_accuracy'])
log_config = dict(interval=100, hooks=[dict(type='TextLoggerHook')])

# runtime settings
log_level = 'INFO'
work_dir = './work_dirs/stgcn++/stgcn++_ntu120_xsub_hrnet/jnew'
78 changes: 78 additions & 0 deletions tools/data/Weizman/genjson.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
import os
import decord
import json
from mmcv import load, dump

from pyskl.smp import mwlines


def writeJson(path_train, jsonpath):
outpot_list = []
trainfile_list = os.listdir(path_train)
for train_name in trainfile_list:
traindit = {}
sp = train_name.split('_')
traindit['vid_name'] = train_name.replace('.avi', '')
traindit['label'] = int(sp[1].replace('.avi', ''))
traindit['start_frame'] = 0

video_path = os.path.join(path_train, train_name)
vid = decord.VideoReader(video_path)
traindit['end_frame'] = len(vid)
outpot_list.append(traindit.copy())
with open(jsonpath, 'w') as outfile:
json.dump(outpot_list, outfile)


# path是数据集路径 dirpath = '../data/Weizmann'
# name为生成的list文件名称,这里为 'Weizmann'
def writeList(dirpath, name):
path_train = os.path.join(dirpath, 'train')
path_test = os.path.join(dirpath, 'test')
trainfile_list = os.listdir(path_train)
testfile_list = os.listdir(path_test)

train = []
for train_name in trainfile_list:
traindit = {}
sp = train_name.split('_')

traindit['vid_name'] = train_name
traindit['label'] = sp[1].replace('.avi', '')
train.append(traindit)
test = []
for test_name in testfile_list:
testdit = {}
sp = test_name.split('_')
testdit['vid_name'] = test_name
testdit['label'] = sp[1].replace('.avi', '')
test.append(testdit)

tmpl1 = os.path.join(path_train, '{}')
lines1 = [(tmpl1 + ' {}').format(x['vid_name'], x['label']) for x in train]

tmpl2 = os.path.join(path_test, '{}')
lines2 = [(tmpl2 + ' {}').format(x['vid_name'], x['label']) for x in test]
lines = lines1 + lines2
mwlines(lines, os.path.join(dirpath, name))


def traintest(dirpath, pklname, newpklname):
os.chdir(dirpath)
train = load('train.json')
test = load('test.json')
annotations = load(pklname)
split = dict()
split['xsub_train'] = [x['vid_name'] for x in train]
split['xsub_val'] = [x['vid_name'] for x in test]
dump(dict(split=split, annotations=annotations), newpklname)


if __name__ == '__main__':
dirpath = './'
pklname = 'train.pkl'
newpklname = 'Wei_xsub_stgn++.pkl'
# writeJson('test', 'test.json')
# writeJson('train', 'train.json')
traintest(dirpath, pklname, newpklname)
# writeList('./', 'Weizmann.list')
2 changes: 1 addition & 1 deletion tools/data/custom_2d_skeleton.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def main():
my_part = annos
os.makedirs(args.tmpdir, exist_ok=True)
else:
init_dist('pytorch', backend='nccl')
init_dist('pytorch', backend='gloo')
rank, world_size = get_dist_info()
if rank == 0:
os.makedirs(args.tmpdir, exist_ok=True)
Expand Down
10 changes: 10 additions & 0 deletions tools/data/label_map/Weizman.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
wave2
bend
jack
jump
pjump
run
side
skip
walk
wave1
2 changes: 1 addition & 1 deletion tools/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def main():
cfg.data.test.test_mode = True

if not hasattr(cfg, 'dist_params'):
cfg.dist_params = dict(backend='nccl')
cfg.dist_params = dict(backend='gloo')

init_dist(args.launcher, **cfg.dist_params)
rank, world_size = get_dist_info()
Expand Down

0 comments on commit cec270d

Please sign in to comment.