forked from david-gpu/srez
-
Notifications
You must be signed in to change notification settings - Fork 1
/
srez_main.py
219 lines (160 loc) · 7.15 KB
/
srez_main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
import srez_demo
import srez_input
import srez_model
import srez_train
import os.path
import random
import numpy as np
import numpy.random
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
# Configuration (alphabetically)
tf.app.flags.DEFINE_integer('batch_size', 16,
"Number of samples per batch.")
tf.app.flags.DEFINE_string('checkpoint_dir', 'checkpoint',
"Output folder where checkpoints are dumped.")
tf.app.flags.DEFINE_integer('checkpoint_period', 10000,
"Number of batches in between checkpoints")
tf.app.flags.DEFINE_string('summary_dir', 'summary',
"Diretory to save TensorBoard summaries")
tf.app.flags.DEFINE_string('dataset', 'dataset',
"Path to the dataset directory.")
tf.app.flags.DEFINE_float('epsilon', 1e-8,
"Fuzz term to avoid numerical instability")
tf.app.flags.DEFINE_string('run', 'demo',
"Which operation to run. [demo|train]")
tf.app.flags.DEFINE_string('loss', 'gan',
"which loss to run. [gan | wgan | wgangp]")
tf.app.flags.DEFINE_float('lambda_', 10.,
"Gradient penalty lambda hyperparameter")
tf.app.flags.DEFINE_float('gene_l1_factor', .90,
"Multiplier for generator L1 loss term")
tf.app.flags.DEFINE_string('optimizer', 'rmsprop',
"Which optimizer to use. [rmsprop | adam]")
tf.app.flags.DEFINE_float('learning_beta1', 0.5,
"Beta1 parameter used for AdamOptimizer")
tf.app.flags.DEFINE_float('learning_rate_start', 0.00020,
"Starting learning rate used for AdamOptimizer")
tf.app.flags.DEFINE_float('train_noise', 0.03,
"level of Gaussian noise added to training images")
tf.app.flags.DEFINE_integer('learning_rate_half_life', 5000,
"Number of batches until learning rate is halved")
tf.app.flags.DEFINE_bool('log_device_placement', False,
"Log the device where variables are placed.")
tf.app.flags.DEFINE_integer('sample_size', 64,
"Image sample size in pixels. Range [64,128]")
tf.app.flags.DEFINE_integer('summary_period', 200,
"Number of batches between summary data dumps")
tf.app.flags.DEFINE_integer('random_seed', 0,
"Seed used to initialize rng.")
tf.app.flags.DEFINE_integer('test_vectors', 16,
"""Number of features to use for testing""")
tf.app.flags.DEFINE_string('train_dir', 'train',
"Output folder where training logs are dumped.")
tf.app.flags.DEFINE_integer('train_time', 200,
"Time in minutes to train the model")
def prepare_dirs(delete_train_dir=False):
# Create checkpoint dir (do not delete anything)
if not tf.gfile.Exists(FLAGS.checkpoint_dir):
tf.gfile.MakeDirs(FLAGS.checkpoint_dir)
# Create summary dir
if not tf.gfile.Exists(FLAGS.summary_dir):
tf.gfile.MakeDirs(FLAGS.summary_dir)
# Cleanup train dir
if delete_train_dir:
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
# Return names of training files
if not tf.gfile.Exists(FLAGS.dataset) or \
not tf.gfile.IsDirectory(FLAGS.dataset):
raise FileNotFoundError("Could not find folder `%s'" % (FLAGS.dataset,))
filenames = tf.gfile.ListDirectory(FLAGS.dataset)
filenames = sorted(filenames)
random.shuffle(filenames)
filenames = [os.path.join(FLAGS.dataset, f) for f in filenames]
return filenames
def setup_tensorflow():
# Create session
config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=config)
# Initialize rng with a deterministic seed
with sess.graph.as_default():
tf.set_random_seed(FLAGS.random_seed)
random.seed(FLAGS.random_seed)
np.random.seed(FLAGS.random_seed)
summary_writer = tf.summary.FileWriter(FLAGS.summary_dir, sess.graph)
return sess, summary_writer
def _demo():
sess = None
# Execute demo
srez_demo.demo1(sess)
return
# Load checkpoint
if not tf.gfile.IsDirectory(FLAGS.checkpoint_dir):
raise FileNotFoundError("Could not find folder `%s'" % (FLAGS.checkpoint_dir,))
# Setup global tensorflow state
sess, summary_writer = setup_tensorflow()
# Prepare directories
filenames = prepare_dirs(delete_train_dir=False)
# Setup async input queues
features, labels = srez_input.setup_inputs(sess, filenames)
# Create and initialize model
[gene_loss, disc_loss,
disc_real_loss, disc_fake_loss,
gene_minput, gene_moutput,
gene_output, gene_var_list,
disc_real_output, disc_fake_output,
disc_var_list] = srez_model.create_model(sess, features, labels)
# Restore variables from checkpoint
saver = tf.train.Saver()
filename = 'checkpoint_new.txt'
filename = os.path.join(FLAGS.checkpoint_dir, filename)
saver.restore(sess, filename)
# Execute demo
srez_demo.demo1(sess)
class TrainData(object):
def __init__(self, dictionary):
self.__dict__.update(dictionary)
def _train():
# Setup global tensorflow state
sess, summary_writer = setup_tensorflow()
# Prepare directories
all_filenames = prepare_dirs(delete_train_dir=True)
# Separate training and test sets
train_filenames = all_filenames[:-FLAGS.test_vectors]
test_filenames = all_filenames[-FLAGS.test_vectors:]
# TBD: Maybe download dataset here
# Setup async input queues
train_features, train_labels = srez_input.setup_inputs(sess, train_filenames)
test_features, test_labels = srez_input.setup_inputs(sess, test_filenames)
# Add some noise during training (think denoising autoencoders)
noise_level = FLAGS.train_noise
noisy_train_features = train_features + \
tf.random_normal(train_features.get_shape(), stddev=noise_level)
# Create and initialize model
[gene_loss, disc_loss,
disc_real_loss, disc_fake_loss,
gene_minput, gene_moutput,
gene_output, gene_var_list,
disc_real_output, disc_fake_output,
disc_var_list] = srez_model.create_model(
sess, train_features, train_labels)
(global_step, learning_rate, gene_minimize, disc_minimize, d_clip) = \
srez_model.create_optimizers(gene_loss, gene_var_list,
disc_loss, disc_var_list)
tf.summary.scalar('generator_loss', gene_loss)
# tf.summary.scalar('discriminator_real_loss', disc_real_loss)
# tf.summary.scalar('discriminator_fake_loss', disc_fake_loss)
tf.summary.scalar('discriminator_tot_loss', disc_loss)
# Train model
train_data = TrainData(locals())
srez_train.train_model(train_data)
def main(argv=None):
# Training or showing off?
if FLAGS.run == 'demo':
_demo()
elif FLAGS.run == 'train':
_train()
if __name__ == '__main__':
tf.app.run()