Week-7
Applying the Autoencoder algorithms for encoding the real-world data
Program
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
# Generate or load real-world data (using synthetic data for demonstration)
data_size = 1000
feature_size = 20
data = np.random.rand(data_size, feature_size)
# Scale the data
scaler = MinMaxScaler()
data_scaled = scaler.fit_transform(data)
# Split data into training and testing sets
train_data, test_data = train_test_split(data_scaled, test_size=0.2, random_state=42)
# Autoencoder architecture
encoding_dim = 10 # Dimensionality of encoded data
# Input layer
input_layer = Input(shape=(feature_size,))
# Encoder layers
encoded = Dense(15, activation='relu')(input_layer)
encoded = Dense(encoding_dim, activation='relu')(encoded)
# Decoder layers
decoded = Dense(15, activation='relu')(encoded)
decoded = Dense(feature_size, activation='sigmoid')(decoded)
# Define the autoencoder model
autoencoder = Model(inputs=input_layer, outputs=decoded)
autoencoder.compile(optimizer='adam', loss='mse')
# Train the autoencoder
epochs = 50
batch_size = 32
history = autoencoder.fit(
train_data, train_data,
epochs=epochs,
batch_size=batch_size,
shuffle=True,
validation_data=(test_data, test_data))
# Define the encoder model
encoder = Model(inputs=input_layer, outputs=encoded)
# Encode the test data
encoded_data = encoder.predict(test_data)
# Decode the encoded data (for validation)
decoded_data = autoencoder.predict(test_data)
print("Original Test Data:", test_data[:5])
print("Encoded Test Data:", encoded_ data[:5])
print("Decoded Test Data:", decoded_data[:5])
Output:
Epoch 1/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 7s 30ms/step - loss: 0.0876 - val_loss: 0.0825
Epoch 2/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0841 - val_loss: 0.0811
Epoch 3/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0821 - val_loss: 0.0800
Epoch 4/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0817 - val_loss: 0.0787
Epoch 5/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0796 - val_loss: 0.0773
Epoch 6/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0782 - val_loss: 0.0758
:::::::::::::::
:::::::::::::::::
Epoch 48/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.0481 - val_loss: 0.0489
Epoch 49/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.0483 - val_loss: 0.0489
Epoch 50/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.0473 - val_loss: 0.0487
7/7 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step
7/7 ━━━━━━━━━━━━━━━━━━━━ 0s 21ms/step
Original Test Data: [[0.13894161 0.44540135 0.17539685 0.87496466 0.54381249 0.89209006
Week-8
Applying Generative Adversial Networks for image generation and unsupervised tasks.
import tensorflow as tf
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Flatten, Reshape, LeakyReLU, BatchNormalization
from tensorflow.keras.datasets import mnist
import numpy as np
import matplotlib.pyplot as plt
# Load MNIST dataset
(x_train, _), (_, _) = mnist.load_data()
x_train = x_train / 255.0 # Normalize to [0, 1]
x_train = x_train.reshape((-1, 28 * 28)) # Flatten images for input to the GAN
# Hyperparameters
latent_dim = 100 # Size of random noise vector
batch_size = 128
epochs = 10000
display_interval = 1000
# Build the generator
def build_generator():
model = Sequential([
Dense(256, input_dim=latent_dim),
LeakyReLU(alpha=0.2),
BatchNormalization(momentum=0.8),
Dense(512),
LeakyReLU (alpha=0.2),
BatchNormalization (momentum=0.8),
Dense(1024),
LeakyReLU (alpha=0.2),
BatchNormalization (momentum=0.8),
Dense (28 * 28, activation='tanh'),
Reshape ((28, 28))
])
return model
# Build the discriminator
def build_discriminator():
model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(512),
LeakyReLU(alpha=0.2),
Dense(256),
LeakyReLU(alpha=0.2),
Dense(1, activation='sigmoid')
])
return model
# Build and compile the discriminator
discriminator = build_discriminator()
discriminator.compile(optimizer=tf.keras.optimizers.Adam(0.0002, 0.5),
loss='binary_crossentropy',
metrics=['accuracy'])
# Build the generator
generator = build_generator()
# Build the GAN by stacking generator and discriminator
random_input = tf.keras.Input(shape=(latent_dim,))
generated_image = generator(random_input)
discriminator.trainable = False # Freeze discriminator during generator training
validity = discriminator(generated_image)
gan = Model(random_input, validity)
gan.compile(optimizer=tf.keras.optimizers.Adam(0.0002, 0.5), loss='binary_crossentropy')
# Training the GAN
def train(epochs, batch_size, display_interval):
valid = np.ones((batch_size, 1)) # Labels for real images
fake = np.zeros((batch_size, 1)) # Labels for fake images
for epoch in range(epochs):
# Train the discriminator
idx = np.random.randint(0, x_train.shape[0], batch_size)
real_images = x_train[idx]
noise = np.random.normal(0, 1, (batch_size, latent_dim))
generated_images = generator.predict(noise)
d_loss_real = discriminator.train_on_batch(real_images, valid)
d_loss_fake = discriminator.train_on_batch(generated_images, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# Train the generator
noise = np.random.normal(0, 1, (batch_size, latent_dim))
g_loss = gan.train_on_batch(noise, valid)
# Display progress
if epoch % display_interval == 0:
print(f"{epoch} [D loss: {d_loss[0]:.4f}, acc.: {100 * d_loss[1]:.2f}%] [G loss: {g_loss:.4f}]")
display_images(generator, epoch)
# Display generated images
def display_images(generator, epoch, examples=5):
noise = np.random.normal(0, 1, (examples * examples, latent_dim))
generated_images = generator.predict(noise)
generated_images = 0.5 * generated_images + 0.5 # Rescale to [0, 1]
fig, axs = plt.subplots(examples, examples)
count = 0
for i in range(examples):
for j in range(examples):
axs[i, j].imshow(generated_images[count, :, :], cmap='gray')
axs[i, j].axis('off')
count += 1
plt.show()
# Train GAN
train(epochs, batch_size, display_interval)
Output:
0 [D loss: 0.6921, acc.: 50.00%] [G loss: 0.7332] 1000 [D loss: 0.2583, acc.: 93.16%] [G loss: 2.1025] 2000
[D loss: 0.1421, acc.: 95.34%] [G loss: 2.8049] 3000 [D loss: 0.1043, acc.: 98.21%] [G loss: 3.2345] ... 9000
[D loss: 0.0642, acc.: 99.12%] [G loss: 4.6521]
Epoch 0…epoch 1000…..epoch5000+…..epoch10000