forked from hanzhanggit/StackGAN
-
Notifications
You must be signed in to change notification settings - Fork 0
/
preprocess_birds.py
101 lines (85 loc) · 3.2 KB
/
preprocess_birds.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# import tensorflow as tf
import numpy as np
import os
import pickle
from misc.utils import get_image
import scipy.misc
import pandas as pd
# from glob import glob
# TODO: 1. current label is temporary, need to change according to real label
# 2. Current, only split the data into train, need to handel train, test
LR_HR_RETIO = 4
IMSIZE = 256
LOAD_SIZE = int(IMSIZE * 76 / 64)
BIRD_DIR = 'Data/birds'
def load_filenames(data_dir):
filepath = data_dir + 'filenames.pickle'
with open(filepath, 'rb') as f:
filenames = pickle.load(f)
print('Load filenames from: %s (%d)' % (filepath, len(filenames)))
return filenames
def load_bbox(data_dir):
bbox_path = os.path.join(data_dir, 'CUB_200_2011/bounding_boxes.txt')
df_bounding_boxes = pd.read_csv(bbox_path,
delim_whitespace=True,
header=None).astype(int)
#
filepath = os.path.join(data_dir, 'CUB_200_2011/images.txt')
df_filenames = pd.read_csv(filepath, delim_whitespace=True, header=None)
filenames = df_filenames[1].tolist()
print('Total filenames: ', len(filenames), filenames[0])
#
filename_bbox = {img_file[:-4]: [] for img_file in filenames}
numImgs = len(filenames)
for i in xrange(0, numImgs):
# bbox = [x-left, y-top, width, height]
bbox = df_bounding_boxes.iloc[i][1:].tolist()
key = filenames[i][:-4]
filename_bbox[key] = bbox
#
return filename_bbox
def save_data_list(inpath, outpath, filenames, filename_bbox):
hr_images = []
lr_images = []
lr_size = int(LOAD_SIZE / LR_HR_RETIO)
cnt = 0
for key in filenames:
bbox = filename_bbox[key]
f_name = '%s/CUB_200_2011/images/%s.jpg' % (inpath, key)
img = get_image(f_name, LOAD_SIZE, is_crop=True, bbox=bbox)
img = img.astype('uint8')
hr_images.append(img)
lr_img = scipy.misc.imresize(img, [lr_size, lr_size], 'bicubic')
lr_images.append(lr_img)
cnt += 1
if cnt % 100 == 0:
print('Load %d......' % cnt)
#
print('images', len(hr_images), hr_images[0].shape, lr_images[0].shape)
#
outfile = outpath + str(LOAD_SIZE) + 'images.pickle'
with open(outfile, 'wb') as f_out:
pickle.dump(hr_images, f_out)
print('save to: ', outfile)
#
outfile = outpath + str(lr_size) + 'images.pickle'
with open(outfile, 'wb') as f_out:
pickle.dump(lr_images, f_out)
print('save to: ', outfile)
def convert_birds_dataset_pickle(inpath):
# Load dictionary between image filename to its bbox
filename_bbox = load_bbox(inpath)
# ## For Train data
train_dir = os.path.join(inpath, 'train/')
train_filenames = load_filenames(train_dir)
save_data_list(inpath, train_dir, train_filenames, filename_bbox)
# ## For Test data
test_dir = os.path.join(inpath, 'test/')
test_filenames = load_filenames(test_dir)
save_data_list(inpath, test_dir, test_filenames, filename_bbox)
if __name__ == '__main__':
convert_birds_dataset_pickle(BIRD_DIR)