From 82baf1f89c2c9aafef0cea84cbcd4b38e8b40367 Mon Sep 17 00:00:00 2001 From: wazeer zulfikar Date: Fri, 7 Jun 2019 13:34:37 -0400 Subject: [PATCH 1/9] Added dcgan model architecture --- nobrainer/models/__init__.py | 3 +- nobrainer/models/dcgan.py | 90 ++++++++++++++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 1 deletion(-) create mode 100644 nobrainer/models/dcgan.py diff --git a/nobrainer/models/__init__.py b/nobrainer/models/__init__.py index ce9f9e10..edc26846 100644 --- a/nobrainer/models/__init__.py +++ b/nobrainer/models/__init__.py @@ -3,7 +3,7 @@ from .meshnet import meshnet from .progressivegan import progressivegan from .unet import unet - +from .dcgan import dcgan def get(name): """Return callable that creates a particular `tf.keras.Model`. @@ -25,6 +25,7 @@ def get(name): "unet": unet, "autoencoder": autoencoder, "progressivegan": progressivegan, + "dcgan": dcgan } try: diff --git a/nobrainer/models/dcgan.py b/nobrainer/models/dcgan.py new file mode 100644 index 00000000..d6686cd3 --- /dev/null +++ b/nobrainer/models/dcgan.py @@ -0,0 +1,90 @@ +"""Model definition for DCGAN. +""" +import math + +import tensorflow as tf +from tensorflow.keras import layers, models + +def dcgan(output_shape, z_dim=256, n_base_filters=16, batchnorm=True, batch_size=None, name='dcgan'): + """Instantiate DCGAN Architecture. + + Parameters + ---------- + output_shape: list or tuple of four ints, the shape of the output images. Omit + the batch dimension, and include the number of channels. Currently, only squares and cubes supported. + z_dim: int, the dimensions of the encoding of the latent code. This would translate + to a latent code of dimensions encoding_dimx1. + n_base_filters: int, number of base filters the models first convolutional layer. The subsequent layers + have n_filters which are multiples of n_base_filters. + batchnorm: bool, whether to use batch normalization in the network. + batch_size: int, number of samples in each batch. This must be set when + training on TPUs. + name: str, name to give to the resulting model object. + + Returns + ------- + Generator Model object. + Discriminator Model object. + """ + + conv_kwds = { + 'kernel_size': 4, + 'activation': None, + 'padding': 'same', + 'strides': 2 + } + + conv_transpose_kwds = { + 'kernel_size': 4, + 'strides': 2, + 'activation': None, + 'padding': 'same', + } + + dimensions = output_shape[:-1] + n_dims = len(dimensions) + + if not (n_dims in [2,3] and dimensions[1:]==dimensions[:-1]): + raise ValueError('Dimensions should be of square or cube!') + + Conv = getattr(layers, 'Conv{}D'.format(n_dims)) + ConvTranspose = getattr(layers, 'Conv{}DTranspose'.format(n_dims)) + n_layers = int(math.log(dimensions[0], 2)) + + # Generator + z_input = layers.Input(shape=(z_dim,), batch_size=batch_size) + + project = layers.Dense(pow(4, n_dims)*z_dim)(z_input) + project = layers.ReLU()(project) + project = layers.Reshape((4,)*n_dims+(z_dim,))(project) + x = project + + for i in range(n_layers-2)[::-1]: + n_filters = min(n_base_filters*(2**(i)), z_dim) + + x = ConvTranspose(n_filters, **conv_transpose_kwds)(x) + if batchnorm: + x = layers.BatchNormalization()(x) + x = layers.LeakyReLU()(x) + + outputs = Conv(1, 3, activation='sigmoid', padding='same')(x) + + generator = models.Model(inputs=[z_input], outputs=[outputs], name=name+'_generator') + + # PatchGAN Discriminator + inputs = layers.Input(shape=(output_shape), batch_size=batch_size) + x = inputs + + for i in range(n_layers-3): + n_filters = min(n_base_filters*(2**(i)), z_dim) + + x = Conv(n_filters, **conv_kwds)(x) + if batchnorm: + x = layers.BatchNormalization()(x) + x = layers.ReLU()(x) + + pred = Conv(1, 3, padding='same', activation='sigmoid')(x) + + discriminator = models.Model(inputs=[inputs], outputs=[pred], name=name+'_discriminator') + + return generator, discriminator \ No newline at end of file From d1a22c42ce330d7f317d196a2651d94bd0e7580a Mon Sep 17 00:00:00 2001 From: wazeer zulfikar Date: Mon, 10 Jun 2019 10:13:47 -0400 Subject: [PATCH 2/9] Added tests for dcgan --- nobrainer/models/tests/models_test.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/nobrainer/models/tests/models_test.py b/nobrainer/models/tests/models_test.py index 266a0b10..5df7894c 100644 --- a/nobrainer/models/tests/models_test.py +++ b/nobrainer/models/tests/models_test.py @@ -5,9 +5,9 @@ from ..autoencoder import autoencoder from ..highresnet import highresnet from ..meshnet import meshnet -from ..progressivegan import progressivegan from ..unet import unet - +from ..progressivegan import progressivegan +from ..dcgan import dcgan def model_test(model_cls, n_classes, input_shape, kwds={}): """Tests for models.""" @@ -72,7 +72,6 @@ def test_autoencoder(): actual_output = model.predict(x) assert actual_output.shape == x.shape - def test_progressivegan(): """Test for both discriminator and generator of progressive gan""" @@ -107,3 +106,21 @@ def test_progressivegan(): assert fake_pred.shape == (real_image_input.shape[0],) assert real_labels_pred.shape == (real_image_input.shape[0], label_size) assert fake_labels_pred.shape == (real_image_input.shape[0], label_size) + +def test_dcgan(): + """Special test for dcgan.""" + + output_shape = (1,32,32,32,1) + z_dim = 32 + z = np.random.random((1,z_dim)) + + pred_shape = (1,8,8,8,1) + + generator, discriminator = dcgan(output_shape[1:], z_dim=z_dim) + generator.compile(tf.train.AdamOptimizer(), 'mse') + discriminator.compile(tf.train.AdamOptimizer(), 'mse') + + fake_images = generator.predict(z) + fake_pred = discriminator.predict(fake_images) + + assert fake_images.shape == output_shape and fake_pred.shape == pred_shape \ No newline at end of file From 73338bbf385e83fea7f897ee8c5bb5ecea821e8b Mon Sep 17 00:00:00 2001 From: wazeer zulfikar Date: Mon, 10 Jun 2019 10:19:11 -0400 Subject: [PATCH 3/9] Minor documentation fixes --- nobrainer/models/dcgan.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nobrainer/models/dcgan.py b/nobrainer/models/dcgan.py index d6686cd3..1db0ee03 100644 --- a/nobrainer/models/dcgan.py +++ b/nobrainer/models/dcgan.py @@ -10,7 +10,7 @@ def dcgan(output_shape, z_dim=256, n_base_filters=16, batchnorm=True, batch_size Parameters ---------- - output_shape: list or tuple of four ints, the shape of the output images. Omit + output_shape: list or tuple of four ints, the shape of the output images. Should be scaled to [0,1]. Omit the batch dimension, and include the number of channels. Currently, only squares and cubes supported. z_dim: int, the dimensions of the encoding of the latent code. This would translate to a latent code of dimensions encoding_dimx1. @@ -71,10 +71,9 @@ def dcgan(output_shape, z_dim=256, n_base_filters=16, batchnorm=True, batch_size generator = models.Model(inputs=[z_input], outputs=[outputs], name=name+'_generator') - # PatchGAN Discriminator + # PatchGAN Discriminator with output of 8x8(x8) inputs = layers.Input(shape=(output_shape), batch_size=batch_size) x = inputs - for i in range(n_layers-3): n_filters = min(n_base_filters*(2**(i)), z_dim) From ff7335cc3aceee3f0cc7ccb3095d8326a8d8b995 Mon Sep 17 00:00:00 2001 From: wazeer zulfikar Date: Sat, 19 Jun 2021 09:34:26 -0400 Subject: [PATCH 4/9] added training helper code --- nobrainer/training.py | 115 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 115 insertions(+) diff --git a/nobrainer/training.py b/nobrainer/training.py index addcd4f2..5c3802d8 100644 --- a/nobrainer/training.py +++ b/nobrainer/training.py @@ -150,3 +150,118 @@ def save_weights(self, filepath, **kwargs): self.generator.save( os.path.join(filepath, "generator_res_{}".format(self.resolution)) ) + + + +class GANTrainer(tf.keras.Model): + """Generative Adversarial Network Trainer. + + Trains discriminator and generator alternatively in an adversarial manner for generation of + brain MRI images. + + Parameters + ---------- + discriminator : tf.keras.Model, Instantiated using nobrainer.models + generator : tf.keras.Model, Instantiated using nobrainer.models + gradient_penalty : boolean, Use gradient penalty on discriminator for smooth training. + + References + ---------- + + Links + ----- + """ + + def __init__(self, discriminator, generator, gradient_penalty=False): + super(GANTrainer, self).__init__() + self.discriminator = discriminator + self.generator = generator + self.gradient_penalty = gradient_penalty + self.latent_size = generator.latent_size + + def compile(self, d_optimizer, g_optimizer, g_loss_fn, d_loss_fn): + super(GANTrainer, self).compile() + self.d_optimizer = d_optimizer + self.g_optimizer = g_optimizer + + self.g_loss_fn = compile_utils.LossesContainer(g_loss_fn) + self.d_loss_fn = compile_utils.LossesContainer(d_loss_fn) + + if self.gradient_penalty: + self.gradient_penalty_fn = compile_utils.LossesContainer(gradient_penalty) + + def train_step(self, reals): + if isinstance(reals, tuple): + reals = reals[0] + + # get batch size dynamically + batch_size = tf.shape(reals)[0] + + # normalize the real images using minmax to [-1, 1] + reals = _adjust_dynamic_range(reals, [0.0, 255.0], [-1.0, 1.0]) + + # train discriminator + latents = tf.random.normal((batch_size, self.latent_size)) + fake_labels = tf.ones((batch_size, 1)) * -1 + real_labels = tf.ones((batch_size, 1)) + + with tf.GradientTape() as tape: + fakes = self.generator(latents) + fakes_pred, labels_pred_fake = self.discriminator(fakes) + reals_pred, labels_pred_real = self.discriminator(reals) + + fake_loss = self.d_loss_fn(fake_labels, fakes_pred) + real_loss = self.d_loss_fn(real_labels, reals_pred) + d_loss = 0.5 * (fake_loss + real_loss) + + # calculate and add the gradient penalty loss using average samples for discriminator + if self.gradient_penalty: + weight_shape = (tf.shape(reals)[0],) + ( + 1, + 1, + 1, + 1, + ) # broadcasting to right shape + weight = tf.random.uniform(weight_shape, minval=0, maxval=1) + average_samples = (weight * reals) + ((1 - weight) * fakes) + average_pred = self.discriminator(average_samples) + gradients = tf.gradients(average_pred, average_samples)[0] + gp_loss = self.gradient_penalty_fn(gradients, reals_pred) + d_loss += gp_loss + + d_gradients = tape.gradient(d_loss, self.discriminator.trainable_variables) + self.d_optimizer.apply_gradients( + zip(d_gradients, self.discriminator.trainable_variables) + ) + + # train generator + misleading_labels = tf.ones((batch_size, 1)) + + latents = tf.random.normal((batch_size, self.latent_size)) + with tf.GradientTape() as tape: + fakes = self.generator(latents) + fakes_pred, labels_pred = self.discriminator(fakes) + + g_loss = self.g_loss_fn(misleading_labels, fakes_pred) + + g_gradients = tape.gradient(g_loss, self.generator.trainable_variables) + self.g_optimizer.apply_gradients( + zip(g_gradients, self.generator.trainable_variables) + ) + + return {"d_loss": d_loss, "g_loss": g_loss} + + + def save_weights(self, filepath, **kwargs): + """ + Override base class function to save the weights of the constituent models + """ + self.generator.save_weights( + os.path.join(filepath, "g_weights_res_{}.h5".format(self.resolution)), + **kwargs + ) + self.discriminator.save_weights( + os.path.join(filepath, "d_weights_res_{}.h5".format(self.resolution)), + **kwargs + ) + From 77dc9302f365ea3ae873d2680bd4bcc7e39e8b09 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 19 Jun 2021 13:35:28 +0000 Subject: [PATCH 5/9] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- guide/train_generation_progressive.ipynb | 2 +- nobrainer/models/__init__.py | 5 +- nobrainer/models/dcgan.py | 63 +++++++++++++----------- nobrainer/models/tests/models_test.py | 19 ++++--- nobrainer/training.py | 5 +- 5 files changed, 51 insertions(+), 43 deletions(-) diff --git a/guide/train_generation_progressive.ipynb b/guide/train_generation_progressive.ipynb index 9a4b8663..4096a928 100644 --- a/guide/train_generation_progressive.ipynb +++ b/guide/train_generation_progressive.ipynb @@ -332,4 +332,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/nobrainer/models/__init__.py b/nobrainer/models/__init__.py index edc26846..06d23863 100644 --- a/nobrainer/models/__init__.py +++ b/nobrainer/models/__init__.py @@ -1,9 +1,10 @@ from .autoencoder import autoencoder +from .dcgan import dcgan from .highresnet import highresnet from .meshnet import meshnet from .progressivegan import progressivegan from .unet import unet -from .dcgan import dcgan + def get(name): """Return callable that creates a particular `tf.keras.Model`. @@ -25,7 +26,7 @@ def get(name): "unet": unet, "autoencoder": autoencoder, "progressivegan": progressivegan, - "dcgan": dcgan + "dcgan": dcgan, } try: diff --git a/nobrainer/models/dcgan.py b/nobrainer/models/dcgan.py index 1db0ee03..2855f6fd 100644 --- a/nobrainer/models/dcgan.py +++ b/nobrainer/models/dcgan.py @@ -5,7 +5,15 @@ import tensorflow as tf from tensorflow.keras import layers, models -def dcgan(output_shape, z_dim=256, n_base_filters=16, batchnorm=True, batch_size=None, name='dcgan'): + +def dcgan( + output_shape, + z_dim=256, + n_base_filters=16, + batchnorm=True, + batch_size=None, + name="dcgan", +): """Instantiate DCGAN Architecture. Parameters @@ -27,63 +35,62 @@ def dcgan(output_shape, z_dim=256, n_base_filters=16, batchnorm=True, batch_size Discriminator Model object. """ - conv_kwds = { - 'kernel_size': 4, - 'activation': None, - 'padding': 'same', - 'strides': 2 - } + conv_kwds = {"kernel_size": 4, "activation": None, "padding": "same", "strides": 2} conv_transpose_kwds = { - 'kernel_size': 4, - 'strides': 2, - 'activation': None, - 'padding': 'same', + "kernel_size": 4, + "strides": 2, + "activation": None, + "padding": "same", } dimensions = output_shape[:-1] n_dims = len(dimensions) - if not (n_dims in [2,3] and dimensions[1:]==dimensions[:-1]): - raise ValueError('Dimensions should be of square or cube!') + if not (n_dims in [2, 3] and dimensions[1:] == dimensions[:-1]): + raise ValueError("Dimensions should be of square or cube!") - Conv = getattr(layers, 'Conv{}D'.format(n_dims)) - ConvTranspose = getattr(layers, 'Conv{}DTranspose'.format(n_dims)) + Conv = getattr(layers, "Conv{}D".format(n_dims)) + ConvTranspose = getattr(layers, "Conv{}DTranspose".format(n_dims)) n_layers = int(math.log(dimensions[0], 2)) - # Generator + # Generator z_input = layers.Input(shape=(z_dim,), batch_size=batch_size) - project = layers.Dense(pow(4, n_dims)*z_dim)(z_input) + project = layers.Dense(pow(4, n_dims) * z_dim)(z_input) project = layers.ReLU()(project) - project = layers.Reshape((4,)*n_dims+(z_dim,))(project) + project = layers.Reshape((4,) * n_dims + (z_dim,))(project) x = project - for i in range(n_layers-2)[::-1]: - n_filters = min(n_base_filters*(2**(i)), z_dim) + for i in range(n_layers - 2)[::-1]: + n_filters = min(n_base_filters * (2 ** (i)), z_dim) x = ConvTranspose(n_filters, **conv_transpose_kwds)(x) if batchnorm: x = layers.BatchNormalization()(x) x = layers.LeakyReLU()(x) - outputs = Conv(1, 3, activation='sigmoid', padding='same')(x) + outputs = Conv(1, 3, activation="sigmoid", padding="same")(x) - generator = models.Model(inputs=[z_input], outputs=[outputs], name=name+'_generator') + generator = models.Model( + inputs=[z_input], outputs=[outputs], name=name + "_generator" + ) # PatchGAN Discriminator with output of 8x8(x8) - inputs = layers.Input(shape=(output_shape), batch_size=batch_size) + inputs = layers.Input(shape=(output_shape), batch_size=batch_size) x = inputs - for i in range(n_layers-3): - n_filters = min(n_base_filters*(2**(i)), z_dim) + for i in range(n_layers - 3): + n_filters = min(n_base_filters * (2 ** (i)), z_dim) x = Conv(n_filters, **conv_kwds)(x) if batchnorm: x = layers.BatchNormalization()(x) x = layers.ReLU()(x) - pred = Conv(1, 3, padding='same', activation='sigmoid')(x) + pred = Conv(1, 3, padding="same", activation="sigmoid")(x) - discriminator = models.Model(inputs=[inputs], outputs=[pred], name=name+'_discriminator') + discriminator = models.Model( + inputs=[inputs], outputs=[pred], name=name + "_discriminator" + ) - return generator, discriminator \ No newline at end of file + return generator, discriminator diff --git a/nobrainer/models/tests/models_test.py b/nobrainer/models/tests/models_test.py index 5df7894c..4bddf900 100644 --- a/nobrainer/models/tests/models_test.py +++ b/nobrainer/models/tests/models_test.py @@ -3,11 +3,12 @@ import tensorflow as tf from ..autoencoder import autoencoder +from ..dcgan import dcgan from ..highresnet import highresnet from ..meshnet import meshnet -from ..unet import unet from ..progressivegan import progressivegan -from ..dcgan import dcgan +from ..unet import unet + def model_test(model_cls, n_classes, input_shape, kwds={}): """Tests for models.""" @@ -72,6 +73,7 @@ def test_autoencoder(): actual_output = model.predict(x) assert actual_output.shape == x.shape + def test_progressivegan(): """Test for both discriminator and generator of progressive gan""" @@ -107,20 +109,21 @@ def test_progressivegan(): assert real_labels_pred.shape == (real_image_input.shape[0], label_size) assert fake_labels_pred.shape == (real_image_input.shape[0], label_size) + def test_dcgan(): """Special test for dcgan.""" - output_shape = (1,32,32,32,1) + output_shape = (1, 32, 32, 32, 1) z_dim = 32 - z = np.random.random((1,z_dim)) + z = np.random.random((1, z_dim)) - pred_shape = (1,8,8,8,1) + pred_shape = (1, 8, 8, 8, 1) generator, discriminator = dcgan(output_shape[1:], z_dim=z_dim) - generator.compile(tf.train.AdamOptimizer(), 'mse') - discriminator.compile(tf.train.AdamOptimizer(), 'mse') + generator.compile(tf.train.AdamOptimizer(), "mse") + discriminator.compile(tf.train.AdamOptimizer(), "mse") fake_images = generator.predict(z) fake_pred = discriminator.predict(fake_images) - assert fake_images.shape == output_shape and fake_pred.shape == pred_shape \ No newline at end of file + assert fake_images.shape == output_shape and fake_pred.shape == pred_shape diff --git a/nobrainer/training.py b/nobrainer/training.py index 5c3802d8..9e117804 100644 --- a/nobrainer/training.py +++ b/nobrainer/training.py @@ -152,7 +152,6 @@ def save_weights(self, filepath, **kwargs): ) - class GANTrainer(tf.keras.Model): """Generative Adversarial Network Trainer. @@ -167,7 +166,7 @@ class GANTrainer(tf.keras.Model): References ---------- - + Links ----- """ @@ -251,7 +250,6 @@ def train_step(self, reals): return {"d_loss": d_loss, "g_loss": g_loss} - def save_weights(self, filepath, **kwargs): """ Override base class function to save the weights of the constituent models @@ -264,4 +262,3 @@ def save_weights(self, filepath, **kwargs): os.path.join(filepath, "d_weights_res_{}.h5".format(self.resolution)), **kwargs ) - From dac7ab5e2dc2ee4ba73ffa7fe78da53649326b1e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 11 Oct 2021 18:14:31 +0000 Subject: [PATCH 6/9] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- nobrainer/models/tests/models_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nobrainer/models/tests/models_test.py b/nobrainer/models/tests/models_test.py index 09adddd9..c7a94094 100644 --- a/nobrainer/models/tests/models_test.py +++ b/nobrainer/models/tests/models_test.py @@ -3,9 +3,9 @@ import tensorflow as tf from ..autoencoder import autoencoder -from ..dcgan import dcgan from ..bayesian_vnet import bayesian_vnet from ..bayesian_vnet_semi import bayesian_vnet_semi +from ..dcgan import dcgan from ..highresnet import highresnet from ..meshnet import meshnet from ..progressivegan import progressivegan From 81d676851e3266bcdd11afc634a8ad037a02d57c Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Mon, 11 Oct 2021 14:40:18 -0400 Subject: [PATCH 7/9] fix: flake issues --- nobrainer/models/dcgan.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nobrainer/models/dcgan.py b/nobrainer/models/dcgan.py index 2855f6fd..bfc23750 100644 --- a/nobrainer/models/dcgan.py +++ b/nobrainer/models/dcgan.py @@ -2,7 +2,6 @@ """ import math -import tensorflow as tf from tensorflow.keras import layers, models @@ -18,12 +17,13 @@ def dcgan( Parameters ---------- - output_shape: list or tuple of four ints, the shape of the output images. Should be scaled to [0,1]. Omit - the batch dimension, and include the number of channels. Currently, only squares and cubes supported. + output_shape: list or tuple of four ints, the shape of the output images. Should be + scaled to [0,1]. Omit the batch dimension, and include the number of channels. + Currently, only squares and cubes supported. z_dim: int, the dimensions of the encoding of the latent code. This would translate to a latent code of dimensions encoding_dimx1. - n_base_filters: int, number of base filters the models first convolutional layer. The subsequent layers - have n_filters which are multiples of n_base_filters. + n_base_filters: int, number of base filters the models first convolutional layer. + The subsequent layers have n_filters which are multiples of n_base_filters. batchnorm: bool, whether to use batch normalization in the network. batch_size: int, number of samples in each batch. This must be set when training on TPUs. From 227f14b420f6b1120d346d85e8800493c1f29c75 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 11 Oct 2021 18:44:12 +0000 Subject: [PATCH 8/9] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- nobrainer/models/dcgan.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nobrainer/models/dcgan.py b/nobrainer/models/dcgan.py index bfc23750..2b5f3698 100644 --- a/nobrainer/models/dcgan.py +++ b/nobrainer/models/dcgan.py @@ -17,12 +17,12 @@ def dcgan( Parameters ---------- - output_shape: list or tuple of four ints, the shape of the output images. Should be - scaled to [0,1]. Omit the batch dimension, and include the number of channels. + output_shape: list or tuple of four ints, the shape of the output images. Should be + scaled to [0,1]. Omit the batch dimension, and include the number of channels. Currently, only squares and cubes supported. z_dim: int, the dimensions of the encoding of the latent code. This would translate to a latent code of dimensions encoding_dimx1. - n_base_filters: int, number of base filters the models first convolutional layer. + n_base_filters: int, number of base filters the models first convolutional layer. The subsequent layers have n_filters which are multiples of n_base_filters. batchnorm: bool, whether to use batch normalization in the network. batch_size: int, number of samples in each batch. This must be set when From 10e4f29a3ca82a047ef28183a80de1e5eb5dcd87 Mon Sep 17 00:00:00 2001 From: Satrajit Ghosh Date: Mon, 11 Oct 2021 14:59:17 -0400 Subject: [PATCH 9/9] fix: adam in tf2 --- nobrainer/models/tests/models_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nobrainer/models/tests/models_test.py b/nobrainer/models/tests/models_test.py index c7a94094..a7de8c54 100644 --- a/nobrainer/models/tests/models_test.py +++ b/nobrainer/models/tests/models_test.py @@ -123,8 +123,8 @@ def test_dcgan(): pred_shape = (1, 8, 8, 8, 1) generator, discriminator = dcgan(output_shape[1:], z_dim=z_dim) - generator.compile(tf.train.AdamOptimizer(), "mse") - discriminator.compile(tf.train.AdamOptimizer(), "mse") + generator.compile(tf.optimizers.Adam(), "mse") + discriminator.compile(tf.optimizers.Adam(), "mse") fake_images = generator.predict(z) fake_pred = discriminator.predict(fake_images)