forked from gnebehay/VOTR
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Added python scripts for running trackers on a dataset
- Loading branch information
Showing
5 changed files
with
348 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
/home/georg/Dropbox/common/dotfiles/.cv |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,85 @@ | ||
import numpy as np | ||
|
||
# OVERLAP = CALCULATE_OVERLAP(T1, T2) calculates overlap between trajectories | ||
# T1 and T2, where T1 and T2 are matrices of size N1 x 4 and N2 x 4, where | ||
# the corresponding columns for each matrix describe the upper left and top | ||
# coordinate as well as width and height of the bounding box. | ||
|
||
def overlap(T1,T2): | ||
|
||
#Check for equal length | ||
if not T1.shape[0] == T2.shape[0]: | ||
raise Exception('Number of entries is inconsistent') | ||
|
||
hrzInt = np.minimum(T1[:, 0] + T1[:, 2], T2[:, 0] + T2[:, 2]) - np.maximum(T1[:, 0], T2[:, 0]) | ||
hrzInt = np.maximum(0,hrzInt) | ||
vrtInt = np.minimum(T1[:, 1] + T1[:, 3], T2[:, 1] + T2[:, 3]) - np.maximum(T1[:, 1], T2[:, 1]) | ||
vrtInt = np.maximum(0,vrtInt) | ||
intersection = hrzInt * vrtInt | ||
|
||
union = (T1[:, 2] * T1[:, 3]) + (T2[:, 2] * T2[:, 3]) - intersection | ||
|
||
overlap = intersection / union | ||
|
||
return overlap | ||
|
||
def br(bbs): | ||
|
||
result = np.hstack((bbs[:,[0]] + bbs[:,[2]]-1, bbs[:,[1]] + bbs[:,[3]]-1)) | ||
|
||
return result | ||
|
||
def tl(bbs): | ||
|
||
result = bbs[:,:2] | ||
|
||
return result | ||
|
||
def pts2bb(pts): | ||
|
||
bbs = np.hstack((pts[:,:2], pts[:,2:4]-pts[:,:2]+1)) | ||
|
||
return bbs | ||
|
||
def bb2pts(bbs): | ||
|
||
pts = np.hstack((bbs[:,:2], br(bbs))) | ||
|
||
return pts | ||
|
||
def bb2poly(bbs): | ||
|
||
return pts2poly(bb2pts(bbs)) | ||
|
||
def pts2poly(pts): | ||
|
||
x_min = pts[:,[0]] | ||
y_min = pts[:,[1]] | ||
x_max = pts[:,[2]] | ||
y_max = pts[:,[3]] | ||
|
||
poly = np.hstack((x_min,y_min,x_max,y_min,x_max,y_max,x_min,y_max)) | ||
|
||
return poly | ||
|
||
def poly2bb(poly): | ||
x_coords = poly[:,::2] | ||
y_coords = poly[:,1::2] | ||
|
||
min_x = np.min(x_coords, axis=1) | ||
min_y = np.min(y_coords, axis=1) | ||
max_x = np.max(x_coords, axis=1) | ||
max_y = np.max(y_coords, axis=1) | ||
|
||
A = np.vstack((min_x, min_y, max_x, max_y)).T | ||
|
||
A = pts2bb(A) | ||
|
||
return A | ||
|
||
def write(fname, bbs): | ||
np.savetxt(fname, bbs, fmt='%.2f', delimiter=',') | ||
|
||
def read(fname): | ||
bbs = np.genfromtxt(fname, delimiter=',') | ||
return bbs |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,123 @@ | ||
import json | ||
import numpy as np | ||
import os | ||
import cv2 | ||
|
||
def load_sequences(): | ||
|
||
#Load root dir entry from file | ||
seq_file = os.path.join(os.path.expanduser("~"), '.cv', 'dataset') | ||
|
||
with open(seq_file) as f: | ||
seq_json = json.load(f) | ||
top_dir = seq_json['root_dir'] | ||
|
||
print('Listing sequences in directory ' + top_dir) | ||
|
||
if os.path.exists(os.path.join(top_dir,'datasets')): | ||
datasets = True | ||
else: | ||
datasets = False | ||
|
||
sequences = {} | ||
|
||
if datasets == True: | ||
#Top level directories are dataset names | ||
dirs = [d for d in os.listdir(top_dir) if os.path.isdir(os.path.join(top_dir, d))] | ||
else: | ||
dirs = ['.'] | ||
|
||
for dataset in dirs: | ||
dataset_dir = os.path.join(top_dir, dataset) | ||
seq_dirs = [d for d in os.listdir(dataset_dir) if os.path.isdir(os.path.join(top_dir, dataset, d))] | ||
for seq_name in seq_dirs: | ||
directory = os.path.join(dataset_dir, seq_name) | ||
seq = Sequence() | ||
seq.dataset = dataset | ||
seq.directory = directory | ||
seq.name = seq_name | ||
if datasets == True: | ||
seq.identifier = dataset + '.' + seq_name | ||
else: | ||
seq.identifier = seq_name | ||
sequences[seq.identifier] = seq | ||
#Find groundtruth file | ||
gt_file = os.path.join(directory, 'groundtruth.txt') | ||
if os.path.exists(gt_file): | ||
#Read groundtruth | ||
seq.gt = np.genfromtxt(gt_file, delimiter=',') | ||
|
||
else: | ||
seq.gt = None | ||
print('Warning: sequence ' + seq.identifier + " doesn't have a groundtruth file.") | ||
|
||
seq.load() | ||
|
||
return sequences | ||
|
||
def load_selection(selection_file): | ||
|
||
with open(selection_file) as f: | ||
|
||
selected_sequences = [l.strip() for l in f.readlines()] | ||
|
||
seqs = [seq for seq in load_sequences().values() if seq.identifier in selected_sequences] | ||
|
||
return seqs | ||
|
||
class Sequence: | ||
|
||
def __init__(self): | ||
self.dataset = None | ||
self.gt = None | ||
self.im_list = None | ||
self.name = None | ||
self.num_frames = None | ||
self.directory = None | ||
self.identifier = None | ||
|
||
def load(self): | ||
|
||
if self.im_list is not None: | ||
return | ||
|
||
print('Loading sequence ' + self.identifier) | ||
|
||
first_file = os.path.join(self.directory, '00000001.jpg') | ||
|
||
#Test for file extension | ||
if os.path.exists(first_file): | ||
file_ext = '.jpg' | ||
else: | ||
file_ext = '.png' | ||
first_file = os.path.join(self.directory, '00000001.png') | ||
|
||
im = cv2.imread(first_file) | ||
|
||
self.shape = im.shape | ||
|
||
#Create list of images | ||
im_list = [] | ||
|
||
MAX_IM = 10000000 | ||
|
||
for i in xrange(1,MAX_IM): | ||
im_file = '{0:08d}{1}'.format(i,file_ext) | ||
im_path = os.path.join(self.directory, im_file) | ||
if os.path.exists(im_path): | ||
im_list.append(im_path) | ||
else: | ||
break | ||
|
||
self.im_list = im_list | ||
|
||
#Compute number of frames | ||
self.num_frames = len(im_list) | ||
|
||
if self.gt is not None: | ||
gt_len = self.gt.shape[0] | ||
|
||
if gt_len != self.num_frames: | ||
raise Exception('Number of entries in groundtruth file differs from number of frames in sequence ' + self.identifier + '.') | ||
|
||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,130 @@ | ||
import cvbb | ||
import json | ||
import os | ||
import subprocess | ||
import tempfile | ||
import time | ||
|
||
def load_trackers(): | ||
|
||
#Load root dir entry from file | ||
tracker_file = os.path.join(os.path.expanduser("~"), '.cv', 'trackers') | ||
|
||
with open(tracker_file) as f: | ||
trackers_json = json.load(f) | ||
tracker_names = trackers_json.keys() | ||
|
||
trackers = {} | ||
|
||
for t in tracker_names: | ||
tracker = Tracker() | ||
tracker.name = t | ||
|
||
track_obj = trackers_json[t] | ||
tracker.command = track_obj['command'] | ||
if 'blacklist' in track_obj: | ||
tracker.blacklist = track_obj['blacklist'] | ||
else: | ||
tracker.blacklist = [] | ||
if 'protocol' in track_obj: | ||
tracker.protocol = track_obj['protocol'] | ||
|
||
trackers[tracker.name] = tracker; | ||
|
||
return trackers | ||
|
||
def load_selection(selection_file): | ||
|
||
with open(selection_file) as f: | ||
selection = json.load(f) | ||
|
||
trackers = load_trackers() | ||
|
||
selected_trackers = [] | ||
|
||
for t in selection['selection']: | ||
|
||
name = t.keys()[0] | ||
|
||
#find tracker | ||
tracker = trackers[name] | ||
|
||
#Rename tracker | ||
tracker.name = t[name] | ||
|
||
selected_trackers.append(tracker) | ||
|
||
return selected_trackers | ||
|
||
class Tracker: | ||
|
||
name = None | ||
command = None | ||
protocol = 'BB' | ||
|
||
def run(self, sequence, additional_args = ''): | ||
|
||
#Create working directory | ||
working_dir = tempfile.mkdtemp() | ||
|
||
print 'working dir is', working_dir | ||
|
||
#Save list of images to working directory | ||
image_file = os.path.join(working_dir, 'images.txt') | ||
with open(image_file, 'w') as f: | ||
f.write('\n'.join(sequence.im_list)) | ||
|
||
#Save init file to working directory | ||
init_file = os.path.join(working_dir, 'region.txt') | ||
|
||
|
||
if self.protocol == 'BB': | ||
init_region = sequence.gt[[0],:] | ||
elif self.protocol == 'POLY': | ||
init_region = cvbb.bb2poly(sequence.gt[[0],:]) | ||
else: | ||
raise Exception('Unknown input protocol') | ||
|
||
cvbb.write(init_file, init_region) | ||
|
||
#Remember current directory | ||
current_dir = os.getcwd() | ||
|
||
#Change to working directory | ||
os.chdir(working_dir) | ||
|
||
if additional_args == '': | ||
cmd = self.command | ||
else: | ||
cmd = self.command + ' ' + additional_args | ||
|
||
#Run the tracker | ||
tic = time.time() | ||
print "Running tracker " + self.name + " on sequence " + sequence.name \ | ||
+ " from dataset " + sequence.dataset + " ( " + str(sequence.num_frames) + " frames) using command " + cmd | ||
subprocess.call(cmd, shell=True) | ||
toc = time.time() | ||
elapsed_time = toc - tic | ||
|
||
#Change directory back to old directory | ||
os.chdir(current_dir) | ||
|
||
#Create path to output file | ||
output_file = os.path.join(working_dir, 'output.txt') | ||
|
||
if not os.path.exists(output_file): | ||
raise Exception('No output file was generated') | ||
|
||
#Read output file | ||
results = cvbb.read(output_file) | ||
|
||
if sequence.gt is not None: | ||
if not results.shape[0] == sequence.gt.shape[0]: | ||
raise Exception('Number of output frames does not match number of GT frames') | ||
|
||
return results, elapsed_time | ||
|
||
if __name__ == '__main__': | ||
import sys | ||
load_trackers(sys.argv[1]) | ||
|