-
Notifications
You must be signed in to change notification settings - Fork 11
/
Copy pathexample_evoart.py
124 lines (98 loc) · 4.04 KB
/
example_evoart.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
from tensorgp.engine import *
# NIMA classifier imports
from keras.models import Model
from keras.layers import Dense, Dropout
from keras.applications.mobilenet import MobileNet
from keras.applications.mobilenet import preprocess_input as preprocess_input_mob
from utils.score_utils import mean_score, std_score
# Fitness assessment with the NIMA classifier
# https://github.com/idealo/image-quality-assessment
def nima_classifier(**kwargs):
# read parameters
population = kwargs.get('population')
generation = kwargs.get('generation')
tensors = kwargs.get('tensors')
f_path = kwargs.get('f_path')
objective = kwargs.get('objective')
_resolution = kwargs.get('resolution')
_stf = kwargs.get('stf')
images = True
fn = f_path + "gen" + str(generation).zfill(5)
fitness = []
best_ind = 0
# set objective function according to min/max
fit = 0
if objective == 'minimizing':
condition = lambda: (fit < max_fit) # minimizing
max_fit = float('inf')
else:
condition = lambda: (fit > max_fit) # maximizing
max_fit = float('-inf')
number_tensors = len(tensors)
with tf.device('/GPU:0'):
# NIMA classifier [inputs in range 0 255]
x = np.stack([tensors[index].numpy() for index in range(number_tensors)], axis = 0)
x = preprocess_input_mob(x)
scores = model.predict(x, batch_size = number_tensors, verbose=0)
# scores
for index in range(number_tensors):
#if generation % _stf == 0:
#save_image(tensors[index], index, fn, 3) # save image
mean = mean_score(scores[index])
std = std_score(scores[index])
# fit = mean - std
fit = mean
if condition():
max_fit = fit
best_ind = index
fitness.append(fit)
population[index]['fitness'] = fit
return population, best_ind
if __name__ == "__main__":
# NIMA likes 224 by 224 pixel images, the remaining 3 are the RBG color channels
resolution = [224, 224, 3]
# GP params
dev = '/gpu:0' # device to run, write '/cpu_0' to tun on cpu
number_generations = 5
pop_size = 10
tour_size = 3
mut_prob = 0.1
cross_prob = 0.9
max_tree_dep = 15
# tell the engine that the RGB does not explicitly make part of the terminal set
edims = 2
# Initialize NIMA classifier
base_model = MobileNet((None, None, 3), alpha=1, include_top=False, pooling='avg', weights=None)
x = Dropout(0)(base_model.output)
x = Dense(10, activation='softmax')(x)
model = Model(base_model.input, x)
model.load_weights('weights/weights_mobilenet_aesthetic_0.07.hdf5')
#seed = random.randint(0, 0x7fffffff)
seed = 2020 # reproducibility
# create engine
engine = Engine(fitness_func=nima_classifier,
population_size=pop_size,
tournament_size=tour_size,
mutation_rate=mut_prob,
crossover_rate=cross_prob,
max_tree_depth = max_tree_dep,
target_dims=resolution,
method='ramped half-and-half',
objective='maximizing',
device=dev,
stop_criteria='generation',
stop_value=number_generations,
domain=[-1, 1],
codomain=[0, 1],
do_final_transform=True,
effective_dims=edims,
seed=seed,
debug=0,
save_to_file=1, # save all images from each 10 generations
save_graphics=True,
show_graphics=False,
read_init_pop_from_file=None)
# This experiment is comparatively slower, but bear inmind that the NIMA classifier takes
# a considerable amount of time
# run evolutionary process
engine.run()