State of the Art Neural Networks for Generative Deep Learning
Project description
pyradox-generative
State of the Art Neural Networks for Generative Deep Learning
Table of Contents
Installation
pip install pyradox-generative
Usage
This library provides light weight trainers for the following generative models:
Vanilla GAN
Just provide your genrator and discriminator and train your GAN
Data Preparation:
from pyradox_generative import GAN
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
(x_train, y_train), _ = keras.datasets.mnist.load_data()
x_train = x_train.astype(np.float32) / 255
x_train = x_train.reshape(-1, 28, 28, 1) * 2.0 - 1.0
dataset = tf.data.Dataset.from_tensor_slices(x_train)
dataset = dataset.shuffle(1024)
dataset = dataset.batch(32, drop_remainder=True).prefetch(1)
Define the generator and discriminator models:
generator = keras.models.Sequential(
[
keras.Input(shape=[28]),
keras.layers.Dense(7 * 7 * 3),
keras.layers.Reshape([7, 7, 3]),
keras.layers.BatchNormalization(),
keras.layers.Conv2DTranspose(
32, kernel_size=3, strides=2, padding="same", activation="selu"
),
keras.layers.Conv2DTranspose(
1, kernel_size=3, strides=2, padding="same", activation="tanh"
),
],
name="generator",
)
discriminator = keras.models.Sequential(
[
keras.layers.Conv2D(
32,
kernel_size=3,
strides=2,
padding="same",
activation=keras.layers.LeakyReLU(0.2),
input_shape=[28, 28, 1],
),
keras.layers.Conv2D(
3,
kernel_size=3,
strides=2,
padding="same",
activation=keras.layers.LeakyReLU(0.2),
),
keras.layers.Flatten(),
keras.layers.Dense(1, activation="sigmoid"),
],
name="discriminator",
)
Plug in the models to the trainer class and train them using the very familiar compile and fit methods:
gan = GAN(discriminator=discriminator, generator=generator, latent_dim=28)
gan.compile(
d_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
g_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
loss_fn=keras.losses.BinaryCrossentropy(),
)
history = gan.fit(dataset)
Conditional GAN
Just provide your genrator and discriminator and train your GAN
Data Preparation and calculate the input and output dimensions of generator and discriminator:
from pyradox_generative import ConditionalGAN
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
CODINGS_SIZE = 28
N_CHANNELS = 1
N_CLASSES = 10
G_INP_CHANNELS = CODINGS_SIZE + N_CLASSES
D_INP_CHANNELS = N_CHANNELS + N_CLASSES
(x_train, y_train), _ = keras.datasets.mnist.load_data()
x_train = x_train
x_train = x_train.astype(np.float32) / 255
x_train = x_train.reshape(-1, 28, 28, 1) * 2.0 - 1.0
y_train = y_train
y_train = keras.utils.to_categorical(y_train, 10)
dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.shuffle(1024)
dataset = dataset.batch(32, drop_remainder=True).prefetch(1)
Define the generator and discriminator models:
generator = keras.models.Sequential(
[
keras.Input(shape=[G_INP_CHANNELS]),
keras.layers.Dense(7 * 7 * 3),
keras.layers.Reshape([7, 7, 3]),
keras.layers.BatchNormalization(),
keras.layers.Conv2DTranspose(
32, kernel_size=3, strides=2, padding="same", activation="selu"
),
keras.layers.Conv2DTranspose(
1, kernel_size=3, strides=2, padding="same", activation="tanh"
),
],
name="generator",
)
discriminator = keras.models.Sequential(
[
keras.layers.Conv2D(
32,
kernel_size=3,
strides=2,
padding="same",
activation=keras.layers.LeakyReLU(0.2),
input_shape=[28, 28, D_INP_CHANNELS],
),
keras.layers.Conv2D(
3,
kernel_size=3,
strides=2,
padding="same",
activation=keras.layers.LeakyReLU(0.2),
),
keras.layers.Flatten(),
keras.layers.Dense(1, activation="sigmoid"),
],
name="discriminator",
)
Plug in the models to the trainer class and train them using the very familiar compile and fit methods:
gan = ConditionalGAN(
discriminator=discriminator, generator=generator, latent_dim=CODINGS_SIZE
)
gan.compile(
d_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
g_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
loss_fn=keras.losses.BinaryCrossentropy(),
)
history = gan.fit(dataset)
Wasserstein GAN
Just provide your genrator and discriminator and train your GAN
Data Preparation:
from pyradox_generative import WGANGP
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
(x_train, y_train), _ = keras.datasets.mnist.load_data()
x_train = x_train.astype(np.float32) / 255
x_train = x_train.reshape(-1, 28, 28, 1) * 2.0 - 1.0
dataset = tf.data.Dataset.from_tensor_slices(x_train)
dataset = dataset.shuffle(1024)
dataset = dataset.batch(32, drop_remainder=True).prefetch(1)
Define the generator and discriminator models:
generator = keras.models.Sequential(
[
keras.Input(shape=[28]),
keras.layers.Dense(7 * 7 * 3),
keras.layers.Reshape([7, 7, 3]),
keras.layers.BatchNormalization(),
keras.layers.Conv2DTranspose(
32, kernel_size=3, strides=2, padding="same", activation="selu"
),
keras.layers.Conv2DTranspose(
1, kernel_size=3, strides=2, padding="same", activation="tanh"
),
],
name="generator",
)
discriminator = keras.models.Sequential(
[
keras.layers.Conv2D(
32,
kernel_size=3,
strides=2,
padding="same",
activation=keras.layers.LeakyReLU(0.2),
input_shape=[28, 28, 1],
),
keras.layers.Conv2D(
3,
kernel_size=3,
strides=2,
padding="same",
activation=keras.layers.LeakyReLU(0.2),
),
keras.layers.Flatten(),
keras.layers.Dense(1, activation="sigmoid"),
],
name="discriminator",
)
Plug in the models to the trainer class and train them using the very familiar compile and fit methods:
gan = WGANGP(
discriminator=discriminator,
generator=generator,
latent_dim=28,
discriminator_extra_steps=1,
gp_weight=10,
)
gan.compile(
d_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
g_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
)
history = gan.fit(dataset)
Variational Auto Encoder
Just provide your encoder and decoder and train your VAE Sampling is done internally
Data Preparation:
from pyradox_generative import VAE
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
(x_train, y_train), _ = keras.datasets.mnist.load_data()
x_train = x_train.astype(np.float32) / 255
x_train = x_train.reshape(-1, 28, 28, 1) * 2.0 - 1.0
dataset = tf.data.Dataset.from_tensor_slices(x_train)
dataset = dataset.shuffle(1024)
dataset = dataset.batch(32, drop_remainder=True).prefetch(1)
Define the encoder and decoder models:
encoder = keras.models.Sequential(
[
keras.Input(shape=(28, 28, 1)),
keras.layers.Conv2D(32, 3, activation="relu", strides=2, padding="same"),
keras.layers.Conv2D(64, 3, activation="relu", strides=2, padding="same"),
keras.layers.Flatten(),
keras.layers.Dense(16, activation="relu"),
],
name="encoder",
)
decoder = keras.models.Sequential(
[
keras.Input(shape=(28,)),
keras.layers.Dense(7 * 7 * 64, activation="relu"),
keras.layers.Reshape((7, 7, 64)),
keras.layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same"),
keras.layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same"),
keras.layers.Conv2DTranspose(1, 3, activation="sigmoid", padding="same"),
],
name="decoder",
)
Plug in the models to the trainer class and train them using the very familiar compile and fit methods:
vae = VAE(encoder=encoder, decoder=decoder, latent_dim=28)
vae.compile(keras.optimizers.Adam(learning_rate=0.001))
history = vae.fit(dataset)
Style GAN
Just provide your genrator and discriminator models and train your GAN
Data Preparation:
from pyradox_generative import StyleGAN
import numpy as np
import tensorflow as tf
from functools import partial
def resize_image(res, image):
# only donwsampling, so use nearest neighbor that is faster to run
image = tf.image.resize(
image, (res, res), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR
)
image = tf.cast(image, tf.float32) / 127.5 - 1.0
return image
def create_dataloader(res):
(x_train, y_train), _ = tf.keras.datasets.mnist.load_data()
x_train = x_train[:100, :, :]
x_train = np.pad(x_train, [(0, 0), (2, 2), (2, 2)], mode="constant")
x_train = tf.image.grayscale_to_rgb(tf.expand_dims(x_train, axis=3), name=None)
x_train = tf.data.Dataset.from_tensor_slices(x_train)
batch_size = 32
dl = x_train.map(partial(resize_image, res), num_parallel_calls=tf.data.AUTOTUNE)
dl = dl.shuffle(200).batch(batch_size, drop_remainder=True).prefetch(1).repeat()
return dl
Define the model by providing number of filters for each each resolution (log 2):
gan = StyleGAN(
target_res=32,
start_res=4,
filter_nums={0: 32, 1: 32, 2: 32, 3: 32, 4: 32, 5: 32},
)
opt_cfg = {"learning_rate": 1e-3, "beta_1": 0.0, "beta_2": 0.99, "epsilon": 1e-8}
start_res_log2 = 2
target_res_log2 = 5
Train the Style GAN:
for res_log2 in range(start_res_log2, target_res_log2 + 1):
res = 2 ** res_log2
for phase in ["TRANSITION", "STABLE"]:
if res == 4 and phase == "TRANSITION":
continue
train_dl = create_dataloader(res)
steps = 10
gan.compile(
d_optimizer=tf.keras.optimizers.Adam(**opt_cfg),
g_optimizer=tf.keras.optimizers.Adam(**opt_cfg),
loss_weights={"gradient_penalty": 10, "drift": 0.001},
steps_per_epoch=steps,
res=res,
phase=phase,
run_eagerly=False,
)
print(phase)
history = gan.fit(train_dl, epochs=1, steps_per_epoch=steps)
Cycle GAN
Just provide your genrator and discriminator models and train your GAN
Data Preparation:
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow import keras
from pyradox_generative import CycleGAN
tfds.disable_progress_bar()
autotune = tf.data.AUTOTUNE
orig_img_size = (286, 286)
input_img_size = (256, 256, 3)
def normalize_img(img):
img = tf.cast(img, dtype=tf.float32)
return (img / 127.5) - 1.0
def preprocess_train_image(img, label):
img = tf.image.random_flip_left_right(img)
img = tf.image.resize(img, [*orig_img_size])
img = tf.image.random_crop(img, size=[*input_img_size])
img = normalize_img(img)
return img
def preprocess_test_image(img, label):
img = tf.image.resize(img, [input_img_size[0], input_img_size[1]])
img = normalize_img(img)
return img
train_horses, _ = tfds.load(
"cycle_gan/horse2zebra", with_info=True, as_supervised=True, split="trainA[:5%]"
)
train_zebras, _ = tfds.load(
"cycle_gan/horse2zebra", with_info=True, as_supervised=True, split="trainB[:5%]"
)
buffer_size = 256
batch_size = 1
train_horses = (
train_horses.map(preprocess_train_image, num_parallel_calls=autotune)
.cache()
.shuffle(buffer_size)
.batch(batch_size)
)
train_zebras = (
train_zebras.map(preprocess_train_image, num_parallel_calls=autotune)
.cache()
.shuffle(buffer_size)
.batch(batch_size)
)
Define the generator and discriminator models:
def build_generator(name):
return keras.models.Sequential(
[
keras.layers.Input(shape=input_img_size),
keras.layers.Conv2D(32, 3, activation="relu", padding="same"),
keras.layers.Conv2D(32, 3, activation="relu", padding="same"),
keras.layers.Conv2D(3, 3, activation="tanh", padding="same"),
],
name=name,
)
def build_discriminator(name):
return keras.models.Sequential(
[
keras.layers.Input(shape=input_img_size),
keras.layers.Conv2D(32, 3, activation="relu", padding="same"),
keras.layers.MaxPooling2D(pool_size=2, strides=2),
keras.layers.Conv2D(32, 3, activation="relu", padding="same"),
keras.layers.MaxPooling2D(pool_size=2, strides=2),
keras.layers.Conv2D(32, 3, activation="relu", padding="same"),
keras.layers.MaxPooling2D(pool_size=2, strides=2),
keras.layers.Conv2D(1, 3, activation="relu", padding="same"),
],
name=name,
)
Plug in the models to the trainer class and train them using the very familiar compile and fit methods:
gan = CycleGAN(
generator_g=build_generator("gen_G"),
generator_f=build_generator("gen_F"),
discriminator_x=build_discriminator("disc_X"),
discriminator_y=build_discriminator("disc_Y"),
)
gan.compile(
gen_g_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5),
gen_f_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5),
disc_x_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5),
disc_y_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5),
)
history = gan.fit(
tf.data.Dataset.zip((train_horses, train_zebras)),
)
References
- Generative Adversarial Networks
- Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks
- Conditional Generative Adversarial Nets
- Wasserstein GAN
- Improved Training of Wasserstein GANs
- An Introduction to Variational Autoencoders
- A Style-Based Generator Architecture for Generative Adversarial Networks
- Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks
Project details
Release history Release notifications | RSS feed
Download files
Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
Source Distribution
Built Distribution
Filter files by name, interpreter, ABI, and platform.
If you're not sure about the file name format, learn more about wheel file names.
Copy a direct link to the current filters
File details
Details for the file pyradox-generative-1.0.2.tar.gz.
File metadata
- Download URL: pyradox-generative-1.0.2.tar.gz
- Upload date:
- Size: 15.7 kB
- Tags: Source
- Uploaded using Trusted Publishing? No
- Uploaded via: twine/3.8.0 pkginfo/1.8.2 readme-renderer/32.0 requests/2.25.1 requests-toolbelt/0.9.1 urllib3/1.26.4 tqdm/4.59.0 importlib-metadata/4.10.0 keyring/22.3.0 rfc3986/2.0.0 colorama/0.4.4 CPython/3.8.8
File hashes
| Algorithm | Hash digest | |
|---|---|---|
| SHA256 |
f53be22cc0e201c550434002a0ca31cf05cf390678a9398d0f316fc539c00663
|
|
| MD5 |
e87649c46c52ea3d74e24289cc7c5119
|
|
| BLAKE2b-256 |
3aa4a2b1815a2885c74b2517956a37c0373e8fbde88a45821b858f90f23a0177
|
File details
Details for the file pyradox_generative-1.0.2-py3-none-any.whl.
File metadata
- Download URL: pyradox_generative-1.0.2-py3-none-any.whl
- Upload date:
- Size: 17.1 kB
- Tags: Python 3
- Uploaded using Trusted Publishing? No
- Uploaded via: twine/3.8.0 pkginfo/1.8.2 readme-renderer/32.0 requests/2.25.1 requests-toolbelt/0.9.1 urllib3/1.26.4 tqdm/4.59.0 importlib-metadata/4.10.0 keyring/22.3.0 rfc3986/2.0.0 colorama/0.4.4 CPython/3.8.8
File hashes
| Algorithm | Hash digest | |
|---|---|---|
| SHA256 |
465df4864779acfdbe246521c067bdeffa3ef1ba8e65617e60d624008c0a14f3
|
|
| MD5 |
6cd720c07b4d8a97a01d73dbdb29ece5
|
|
| BLAKE2b-256 |
6f78fe70a48188d488c41a83360a95730eaed842a642cc0369443c05e9cb5f96
|