diff --git a/gan_01.py b/gan_01.py
index 91d5902829126b166da53543695fba93a4a0ebe1..c61a4f13426d4febb909c0e8eb7058216a299fb6 100644
--- a/gan_01.py
+++ b/gan_01.py
@@ -1,3 +1,5 @@
+# A simple GAN
+
 import matplotlib.pyplot as plt
 import numpy as np
 
diff --git a/gan_02.py b/gan_02.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc4e247cb366eede16e601253368204cb41b6fb2
--- /dev/null
+++ b/gan_02.py
@@ -0,0 +1,123 @@
+# A more evoluted GAN with convolution
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from keras.layers import Dense, Flatten, Reshape, Activation, BatchNormalization
+from keras.layers.advanced_activations import LeakyReLU
+from keras.layers.convolutional import Conv2D, Conv2DTranspose
+from keras.models import Sequential
+from keras.datasets import fashion_mnist
+from keras.optimizers import Adam
+
+z_dim = 100
+
+img_lines = 28
+img_columns = 28
+img_channels = 1
+
+img_shape = (img_lines, img_columns, img_channels)
+
+
+def build_generator_conv(z_dim):
+    model = Sequential()
+    # Adapt according to img_lines and img_columns (7*7*256 must be multiple of img_lines*img_colums)
+    model.add(Dense(7*7*256, input_dim=z_dim))
+    model.add(Reshape((7, 7, 256)))
+    model.add(Conv2DTranspose(128, kernel_size=3, strides=2, padding='same'))
+    model.add(BatchNormalization())
+    model.add(LeakyReLU(alpha=0.01))
+    model.add(Conv2DTranspose(64, kernel_size=3, strides=1, padding='same'))
+    model.add(BatchNormalization())
+    model.add(LeakyReLU(alpha=0.01))
+    model.add(Conv2DTranspose(1, kernel_size=3, strides=2, padding='same'))
+    model.add(Activation('tanh'))
+    return model
+
+
+def build_discriminator_conv(img_shape):
+    model = Sequential()
+    # Get image signature with convolutional neural network
+    model.add(Conv2D(32, kernel_size=3, strides=2,
+                     input_shape=img_shape, padding='same'))
+    model.add(LeakyReLU(alpha=0.001))
+    model.add(Conv2D(64, kernel_size=3, strides=2,
+                     input_shape=img_shape, padding='same'))
+    model.add(BatchNormalization())
+    model.add(LeakyReLU(alpha=0.001))
+    model.add(Conv2D(128, kernel_size=3, strides=2,
+                     input_shape=img_shape, padding='same'))
+    model.add(BatchNormalization())
+    model.add(LeakyReLU(alpha=0.001))
+    # Work out if image is of required shape with conventionnal neural network
+    model.add(Flatten())
+    model.add(Dense(1, activation='sigmoid'))
+    return model
+
+
+def build_gan(generator, discriminator):
+    model = Sequential()
+    model.add(generator)
+    model.add(discriminator)
+    discriminator.trainable = False
+    model.compile(loss='binary_crossentropy', optimizer=Adam())
+    return model
+
+
+discriminator = build_discriminator_conv(img_shape)
+discriminator.compile(loss='binary_crossentropy',
+                      optimizer=Adam(), metrics=['accuracy'])
+
+generator = build_generator_conv(z_dim)
+gan = build_gan(generator, discriminator)
+
+
+def sample_images(generator, iter, img_per_l=4, img_per_c=4):
+    z = np.random.normal(0, 1, (img_per_l*img_per_c, z_dim))
+    img_gen = generator.predict(z)
+    img_gen = 0.5*img_gen+0.5
+    _, ax = plt.subplots(img_per_l, img_per_c, figsize=(
+        4, 4), sharey=True, sharex=True)
+    cpt = 0
+    for i in range(img_per_l):
+        for j in range(img_per_c):
+            ax[i, j].imshow(img_gen[cpt, :, :, 0], cmap='gray')
+            ax[i, j].axis('off')
+            cpt += 1
+    plt.savefig("test_"+f'{iter:05d}'+".png", dpi=150)
+
+
+def train(iterations, batch_size, sample_interval):
+    losses = []
+    accuracies = []
+    iteration_checkpoints = []
+    ((X_train, _), (_, _)) = fashion_mnist.load_data()
+    X_train = X_train/127.5-1
+    X_train = np.expand_dims(X_train, axis=3)
+    real = np.ones((batch_size, 1))
+    fake = np.zeros((batch_size, 1))
+    for iteration in range(iterations):
+        idx = np.random.randint(0, X_train.shape[0], batch_size)
+        imgs = X_train[idx]
+        z = np.random.normal(0, 1, (batch_size, z_dim))
+        gen_imgs = generator.predict(z)
+        d_loss_real = discriminator.train_on_batch(imgs, real)
+        d_loss_fake = discriminator.train_on_batch(gen_imgs, fake)
+        d_loss, accuracy = 0.5*np.add(d_loss_real, d_loss_fake)
+        z = np.random.normal(0, 1, (batch_size, z_dim))
+        gen_imgs = generator.predict(z)
+        g_loss = gan.train_on_batch(z, real)
+        if (iteration+1) % sample_interval == 0 or iteration == 0:
+            losses.append((d_loss, g_loss))
+            accuracies.append((100*accuracy))
+            iteration_checkpoints.append(iteration+1)
+            status = 'iteration: {:} [D loss: {:}, acc.: {:2.2%}] [G loss: {:}]'.format(
+                iteration+1, d_loss, accuracy, g_loss)
+            print(status)
+            sample_images(generator, iteration+1)
+
+
+iterations = 20000
+batch_size = 128
+sample_interval = 1000
+train(iterations, batch_size, sample_interval)
diff --git a/gan_03.py b/gan_03.py
new file mode 100644
index 0000000000000000000000000000000000000000..f84fe44d5195db141dc1f75f80e137707bf5b049
--- /dev/null
+++ b/gan_03.py
@@ -0,0 +1,152 @@
+# A more evoluted GAN with convolution and control over the produced image
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from keras.layers import Dense, Flatten, Reshape, Activation, BatchNormalization, Dropout, Input, Embedding, Multiply, Concatenate
+from keras.layers.advanced_activations import LeakyReLU
+from keras.layers.convolutional import Conv2D, Conv2DTranspose
+from keras.models import Sequential, Model
+from keras.datasets import fashion_mnist
+from keras.optimizers import Adam
+
+z_dim = 100
+
+img_lines = 28
+img_columns = 28
+img_channels = 1
+
+img_shape = (img_lines, img_columns, img_channels)
+
+nb_classes = 10
+
+
+def build_generator_conv(z_dim):
+    model = Sequential()
+    # Adapt according to img_lines and img_columns (7*7*256 must be multiple of img_lines*img_colums)
+    model.add(Dense(7*7*256, input_dim=z_dim))
+    model.add(Reshape((7, 7, 256)))
+    model.add(Conv2DTranspose(128, kernel_size=3, strides=2, padding='same'))
+    model.add(BatchNormalization())
+    model.add(LeakyReLU(alpha=0.01))
+    model.add(Conv2DTranspose(64, kernel_size=3, strides=1, padding='same'))
+    model.add(BatchNormalization())
+    model.add(LeakyReLU(alpha=0.01))
+    model.add(Conv2DTranspose(1, kernel_size=3, strides=2, padding='same'))
+    model.add(Activation('tanh'))
+    return model
+
+
+def build_generator_cond(z_dim):
+    z = Input(shape=(z_dim,))
+    input_class = Input(shape=(1,), dtype='int32')
+    class_embedding = Embedding(nb_classes, z_dim, input_length=1)(input_class)
+    class_embedding = Flatten()(class_embedding)
+    Embedded_class_vector = Multiply()([z, class_embedding])
+    generator = build_generator_conv(z_dim)
+    img_cond = generator(Embedded_class_vector)
+    return Model([z, input_class], img_cond)
+
+
+def build_discriminator_conv(img_shape):
+    model = Sequential()
+    # Get image signature with convolutional neural network
+    model.add(Conv2D(32, kernel_size=3, strides=2,
+                     input_shape=(img_shape[0], img_shape[1], img_shape[2]+1), padding='same'))
+    model.add(LeakyReLU(alpha=0.001))
+    model.add(Conv2D(64, kernel_size=3, strides=2,
+                     input_shape=(img_shape[0], img_shape[1], img_shape[2]+1), padding='same'))
+    model.add(BatchNormalization())
+    model.add(LeakyReLU(alpha=0.001))
+    model.add(Conv2D(128, kernel_size=3, strides=2,
+                     input_shape=(img_shape[0], img_shape[1], img_shape[2]+1), padding='same'))
+    model.add(BatchNormalization())
+    model.add(LeakyReLU(alpha=0.001))
+    # Work out if image is of required shape with conventionnal neural network
+    model.add(Flatten())
+    model.add(Dense(1, activation='sigmoid'))
+    return model
+
+
+def build_discriminator_cond(img_shape):
+    img = Input(shape=img_shape)
+    input_class = Input(shape=(1,), dtype='int32')
+    class_embedding = Embedding(nb_classes, np.prod(
+        img_shape), input_length=1)(input_class)
+    class_embedding = Flatten()(class_embedding)
+    class_embedding = Reshape(img_shape)(class_embedding)
+    embedded_class_tensor = Concatenate(axis=-1)([img, class_embedding])
+    discriminator = build_discriminator_conv(img_shape)
+    output_class = discriminator(embedded_class_tensor)
+    return Model([img, input_class], output_class)
+
+
+def build_gan_cond(generator, discriminator):
+    z = Input(shape=(z_dim,))
+    input_class = Input(shape=(1,))
+    img = generator([z, input_class])
+    output_class = discriminator([img, input_class])
+    model = Model([z, input_class], output_class)
+    discriminator.trainable = False
+    model.compile(loss='binary_crossentropy', optimizer=Adam())
+    return model
+
+
+discriminator = build_discriminator_cond(img_shape)
+discriminator.compile(loss='binary_crossentropy',
+                      optimizer=Adam(), metrics=['accuracy'])
+generator = build_generator_cond(z_dim)
+gan = build_gan_cond(generator, discriminator)
+
+
+def sample_images(generator, iter, img_per_l=4, img_per_c=4):
+    z = np.random.normal(0, 1, (img_per_l*img_per_c, z_dim))
+    labels = np.random.randint(
+        0, nb_classes, img_per_l*img_per_c).reshape(-1, 1)
+    img_gen = generator.predict([z, labels])
+    img_gen = 0.5*img_gen+0.5
+    _, ax = plt.subplots(img_per_l, img_per_c, figsize=(
+        4, 4), sharey=True, sharex=True)
+    cpt = 0
+    for i in range(img_per_l):
+        for j in range(img_per_c):
+            ax[i, j].imshow(img_gen[cpt, :, :, 0], cmap='gray')
+            ax[i, j].axis('off')
+            cpt += 1
+    plt.savefig("test_"+f'{iter:05d}'+".png", dpi=150)
+
+
+def train(iterations, batch_size, sample_interval):
+    losses = []
+    iteration_checkpoints = []
+    ((X_train, Y_train), (_, _)) = fashion_mnist.load_data()
+    X_train = X_train/127.5-1
+    X_train = np.expand_dims(X_train, axis=3)
+    real = np.ones((batch_size, 1))
+    fake = np.zeros((batch_size, 1))
+    for iteration in range(iterations):
+        idx = np.random.randint(0, X_train.shape[0], batch_size)
+        imgs = X_train[idx]
+        labels = Y_train[idx]
+        z = np.random.normal(0, 1, (batch_size, z_dim))
+        gen_imgs = generator.predict([z, labels])
+        d_loss_real = discriminator.train_on_batch([imgs, labels], real)
+        d_loss_fake = discriminator.train_on_batch([gen_imgs, labels], fake)
+        d_loss = 0.5*np.add(d_loss_real, d_loss_fake)
+        z = np.random.normal(0, 1, (batch_size, z_dim))
+        label = np.random.randint(0, nb_classes, batch_size).reshape(-1, 1)
+        gen_imgs = generator.predict([z, label])
+        g_loss = gan.train_on_batch([z, label], real)
+        if (iteration+1) % sample_interval == 0 or iteration == 0:
+            losses.append((d_loss, g_loss))
+            iteration_checkpoints.append(iteration+1)
+            status = 'iteration: {:} [D loss: {:}] [G loss: {:}]'.format(
+                iteration+1, d_loss, g_loss)
+            print(status)
+            sample_images(generator, iteration+1)
+
+
+iterations = 20000
+batch_size = 128
+sample_interval = 1000
+train(iterations, batch_size, sample_interval)