Skip to content

Commit

Permalink
Init
Browse files Browse the repository at this point in the history
create files in the first time
  • Loading branch information
shuangjiexu authored Aug 4, 2017
1 parent 190e353 commit b85dd19
Show file tree
Hide file tree
Showing 9 changed files with 1,482 additions and 0 deletions.
95 changes: 95 additions & 0 deletions data/computeOpticalFlow.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
% Copyright (c) 2016 Niall McLaughlin, CSIT, Queen's University Belfast, UK
% Contact: [email protected]
% If you use this code please cite:
% "Recurrent Convolutional Network for Video-based Person Re-Identification",
% N McLaughlin, J Martinez Del Rincon, P Miller,
% IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016
%
% This software is licensed for research and non-commercial use only.
%
% The above copyright notice and this permission notice shall be included in
% all copies or substantial portions of the Software.
%
% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
% OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
% THE SOFTWARE.

%read an image sequence in the ilids video / PRID dataset
%compute optical flow

rootDir = fullfile('D:','person_re-id','data');

for person = 1:319
disp(person)
for cam = 1:2

camName = {'cam_a','cam_b'};

dataDir = fullfile(rootDir,'i-LIDS-VID','sequences',['cam',num2str(cam)],['person',sprintf('%03i',person)]);
%dataDir = fullfile(rootDir,'PRID2011','multi_shot',camName{cam},['person_',sprintf('%04i',person)]);
files = dir(dataDir);

if exist(dataDir)

saveDir = fullfile(rootDir,'i-LIDS-VID-OF-HVP','sequences',['cam',num2str(cam)],['person',sprintf('%03i',person)]);
%saveDir = fullfile(rootDir,'PRID2011-OF-HVP','multi_shot',camName{cam},['person_',sprintf('%04i',person)]);
if ~exist(saveDir)
mkdir(saveDir);
end

seqFiles = {};
for f = 1:length(files)
if length(files(f).name) > 4 && ~isempty(findstr(files(f).name,'.png'))
seqFiles = [seqFiles files(f).name];
end
end

optical = vision.OpticalFlow('Method','Lucas-Kanade','OutputValue', 'Horizontal and vertical components in complex form');

for f = 1:length(seqFiles)
seqImg = imread(fullfile(dataDir,seqFiles{f}));
optFlow = step(optical,double(rgb2gray(seqImg)));

%separate optFlow into mag and phase components
R = abs(optFlow);
theta = angle(optFlow);

%threshold to remove pixels with large magnitude values
ofThreshold = 50;
R = min(R,ofThreshold);
R = max(R,-1*ofThreshold);

%convert back to complex form
Z = R.*exp(1i*theta);

H = imag(optFlow);
V = real(optFlow);
M = abs(optFlow);

H = H + 127;
V = V + 127;
M = M + 127;
P = theta + 127;

imgDims = size(seqImg);
tmpImg = zeros(imgDims);
tmpImg(:,:,1) = H;
tmpImg(:,:,2) = V;
tmpImg(:,:,3) = 0;

tmpImg(tmpImg < 0) = 0;
tmpImg(tmpImg > 255) = 255;

tmpImg = tmpImg ./ 255;

%save optical flow image to file
saveFile = fullfile(saveDir,seqFiles{f});
imwrite(tmpImg,saveFile);
end
end
end
end
106 changes: 106 additions & 0 deletions datasets/datasetUtils.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
-- Copyright (c) 2016 Niall McLaughlin, CSIT, Queen's University Belfast, UK
-- Contact: [email protected]
-- If you use this code please cite:
-- "Recurrent Convolutional Network for Video-based Person Re-Identification",
-- N McLaughlin, J Martinez Del Rincon, P Miller,
-- IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016
--
-- This software is licensed for research and non-commercial use only.
--
-- The above copyright notice and this permission notice shall be included in
-- all copies or substantial portions of the Software.
--
-- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-- THE SOFTWARE.

local dataset_utils = {}

-- given the dataset, which consists of a table where t[x] contains the images for person x
-- split the dataset into testing and training parts
function dataset_utils.partitionDataset(nTotalPersons,testTrainSplit)
local splitPoint = torch.floor(nTotalPersons * testTrainSplit)
local inds = torch.randperm(nTotalPersons)

-- save the inds to a mat file
--mattorch.save('rnnInds.mat',inds)

trainInds = inds[{{1,splitPoint}}]
testInds = inds[{{splitPoint+1,nTotalPersons}}]

print('N train = ' .. trainInds:size(1))
print('N test = ' .. testInds:size(1))

-- save the split to a file for later use
-- datasetSplit = {
-- trainInds = trainInds,
-- testInds = testInds,
-- }
-- torch.save('./trainedNets/dataSplit_PRID2011.th7',datasetSplit)

return trainInds,testInds
end

-- the dataset format is dataset[person][camera][nSeq][nCrop][FeatureVec]
-- choose a pair of sequences from the same person
function dataset_utils.getPosSample(dataset,trainInds,person,sampleSeqLen)

-- choose the camera, ilids video only has two, but change this for other datasets
local camA = 1
local camB = 2

local actualSampleSeqLen = sampleSeqLen
local nSeqA = dataset[trainInds[person]][camA]:size(1)
local nSeqB = dataset[trainInds[person]][camB]:size(1)

-- what to do if the sequence is shorter than the sampleSeqLen
if nSeqA <= sampleSeqLen or nSeqB <= sampleSeqLen then
if nSeqA < nSeqB then
actualSampleSeqLen = nSeqA
else
actualSampleSeqLen = nSeqB
end
end

local startA = torch.floor(torch.rand(1)[1] * ((nSeqA - actualSampleSeqLen) + 1)) + 1
local startB = torch.floor(torch.rand(1)[1] * ((nSeqB - actualSampleSeqLen) + 1)) + 1

return startA,startB,actualSampleSeqLen
end

-- the dataset format is dataset[person][camera][nSeq][nCrop][FeatureVec]
-- choose a pair of sequences from different people
function dataset_utils.getNegSample(dataset,trainInds,sampleSeqLen)

local permAllPersons = torch.randperm(trainInds:size(1))
local personA = permAllPersons[1]--torch.floor(torch.rand(1)[1] * 2) + 1
local personB = permAllPersons[2]--torch.floor(torch.rand(1)[1] * 2) + 1

-- choose the camera, ilids video only has two, but change this for other datasets
local camA = torch.floor(torch.rand(1)[1] * 2) + 1
local camB = torch.floor(torch.rand(1)[1] * 2) + 1

local actualSampleSeqLen = sampleSeqLen
local nSeqA = dataset[trainInds[personA]][camA]:size(1)
local nSeqB = dataset[trainInds[personB]][camB]:size(1)

-- what to do if the sequence is shorter than the sampleSeqLen
if nSeqA <= sampleSeqLen or nSeqB <= sampleSeqLen then
if nSeqA < nSeqB then
actualSampleSeqLen = nSeqA
else
actualSampleSeqLen = nSeqB
end
end

local startA = torch.floor(torch.rand(1)[1] * ((nSeqA - actualSampleSeqLen) + 1)) + 1
local startB = torch.floor(torch.rand(1)[1] * ((nSeqB - actualSampleSeqLen) + 1)) + 1

return personA,personB,camA,camB,startA,startB,actualSampleSeqLen
end

return dataset_utils
Loading

0 comments on commit b85dd19

Please sign in to comment.