Deep Learning Experiments
Deep Learning Experiments
# grab the MNIST dataset (if this is your first time using
this # dataset then the 11MB download may take a
minute) print("[INFO] accessing MNIST...")
((trainX, trainY), (testX, testY)) = mnist.load_data()
# each image in the MNIST dataset is represented as a 28x28x1
2: [0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
3: [0, 0, 0, 1, 0, 0, 0, 0, 0, 0]
4: [0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
5: [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]
6: [0, 0, 0, 0, 0, 0, 1, 0, 0, 0]
7: [0, 0, 0, 0, 0, 0, 0, 1, 0, 0]
8: [0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
9: [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
# define the 784-256-128-10 architecture using Keras
model = Sequential()
model.add(Dense(256, input_shape=(784,), activation="sigmoid"))
model.add(Dense(128, activation="sigmoid"))
model.add(Dense(10, activation="softmax"))
Epoch 98/100
1s - loss: 0.2811 - acc: 0.9199 - val_loss: 0.2857 - val_acc: 0.9153
Epoch 99/100
1s - loss: 0.2802 - acc: 0.9201 - val_loss: 0.2862 - val_acc: 0.9148
Epoch 100/100
1s - loss: 0.2792 - acc: 0.9204 - val_loss: 0.2844 - val_acc: 0.9160
[INFO] evaluating network...
precision recall f1-score support
0.0 0.94 0.96 0.95 1726
1.0 0.95 0.97 0.96 2004
EXPERIMENT NO:-02
Aim:- Applying the convolutional Neural Network on computer vision problem.
Program:-
import pandas as
pd import numpy as
np import cPickle
#Define a function to load each batch as dictionary:
def unpickle(file):
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
#Make dictionaries by calling the above function:
batch1 = unpickle('data/data_batch_1')
batch2 = unpickle('data/data_batch_2')
batch3 = unpickle('data/data_batch_3')
batch4 = unpickle('data/data_batch_4')
batch5 = unpickle('data/data_batch_5')
batch_test = unpickle('data/test_batch')
#Define a function to convert this dictionary into dataframe with image pixel array and labels:
def get_dataframe(batch):
df = pd.DataFrame(batch['data'])
df['image'] = df.as_matrix().tolist()
df.drop(range(3072),axis=1,inplace=True)
df['label'] = batch['labels']
return df
#Define train and test files:
train =
pd.concat([get_dataframe(batch1),get_dataframe(batch2),get_dataframe(batch3),get_dataframe(batch4),g
et_dataframe(batch5)],ignore_index=True)
test = get_dataframe(batch_test)
# We can verify this data by looking at the head and shape of data as
follow: print train.head()
OUTPUT:-
print
train.shape, test.shape
OUTPUT:-
import graphlab as gl
gltrain =
gl.SFrame(train) gltest =
gl.SFrame(test)
model = gl.neuralnet_classifier.create(gltrain, target='label', validation_set=None)
OUTPUT:-
model.evaluate(gltest)
OUTPUT:
OUTPUT:-
#Convert into 256x256 size
gltrain['image'] =
gl.image_analysis.resize(gltrain['glimage'], 256, 256, 3)
gltest['image'] = gl.image_analysis.resize(gltest['glimage'], 256, 256, 3)
#Remove old column:
gltrain.remove_column('glimage')
gltest.remove_column('glimage')
gltrain.head()
OUTPUT:-
simple_classifier =
simple_classifier.evaluate(gltest)
OUTPUT:-
EXPERIMENT NO:-03
AIM:- Image Classification on MNIST dataset.(CNN model with Fully connected
layer.)
Program:
1. Import packages:
import tensorflow as tf
import cv2
Adam, SGD
2. Load dataset:-
This is a dataset of 60,000 28x28 grayscale images of the 10 digits, along with a test set of 10,000 images.
Total no of labels:
(60000,)
print('Label:', y_train[0])
Lable 5
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
print(X_train.shape, X_test.shape)
output:
X_train = X_train/255
X_test = X_test/255
# print(X_train[0])
X_train.shape
Output:
(60000, 784)
# One-hot encoding
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
print(y_train.shape)
output:
(60000, 10)
In [8]:
num_classes = y_test.shape[1]
num_pixels = 784
In [9]:
def baseline_model():
# create model
model = Sequential()
model.add(Dense(64, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
return model
In [10]:
model = baseline_model()
model.summary()
out:
Model: "sequential"
=================================================================
=================================================================
Non-trainable params: 0
In [11]:
5. Train Model
In [12]:
out:
Epoch 1/5
Epoch 2/5
Epoch 3/5
Epoch 4/5
Epoch 5/5
<tensorflow.python.keras.callbacks.History at 0x7f0d68b03c10>
6. Test Model
In [13]:
scores = model.evaluate(X_test, y_test, verbose=1)
print("Error: %.2f%%" % (100-scores[1]*100))
313/313 [==============================] - 0s 1ms/step - loss: 0.3892 - accuracy: 0.8957
Error: 10.43%
Predict In [14]:
img_width, img_height = 28, 28
In [15]:
ii = cv2.imread("../input/mnistpredict/3.png")
# print(gray_image)
plt.imshow(gray_image,cmap='Greys')
plt.show()
# gray_image.shape
x = np.expand_dims(gray_image, axis=0)
x = x.reshape((1, -1))
In [16]:
preds = model.predict_classes(x)
prob = model.predict_proba(x)
Predicted value is 3
import matplotlib.pyplot
np
import pandas as
pd import
tensorflow as tf
x_train =
x_train.astype('float32') / 255.
x_test = x_test.astype('float32') /
255.
class SimpleAutoencoder(Model):
def init (self,latent_dimensions ,
data_shape):
super(SimpleAutoencoder, self). init
() self.latent_dimensions =
latent_dimensions self.data_shape =
data_shape
layers.Dense(latent_dimensions, activation='relu'),
])
tf.keras.Sequential([
layers.Dense(tf.math.reduce_prod(data_shape), activation='sigmoid'),
layers.Reshape(data_shape)
])
input_data_shape = x_test.shape[1:]
simple_autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())
simple_autoencoder.fit(x_train, x_train,
epochs=1,
shuffle=Tru
e,
validation_data=(x_test, x_test))
encoded_imgs = simple_autoencoder.encoder(x_test).numpy()
decoded_imgs =
simple_autoencoder.decoder(encoded_imgs).numpy()
n=6
plt.figure(figsize=(8,
4)) for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test
[i])
plt.title("original"
) plt.gray()
# display reconstruction
ax = plt.subplot(2, n, i + 1
+ n)
plt.imshow(decoded_imgs
[i])
plt.title("reconstructed")
plt.gray()
plt.show()
Shape of the training data: (60000, 28, 28)
Shape of the testing data: (10000, 28,
28) Epoch 1/10
1875/1875 - 6ms/step - loss: 0.0243- val_loss:
[==============================] 12s
0.0091
Epoch 2/10
1875/1875 - 9ms/step - loss: 0.0069- val_loss:
[==============================] 16s
0.0054
Epoch 3/10
1875/1875 - 8ms/step - loss: 0.0051- val_loss:
[==============================] 15s
0.0046
Epoch 4/10
1875/1875 - 8s 5ms/step - loss: 0.0045 - val_loss:
[==============================]
0.0043
Epoch 5/10
1875/1875 - 8s 4ms/step - loss: 0.0043 - val_loss:
[==============================]
0.0041
Epoch 6/10
1875/1875 - 9s 5ms/step - loss: 0.0042 - val_loss:
[==============================]
0.0041
Epoch 7/10
1875/1875 - 7s 4ms/step - loss: 0.0041 - val_loss:
[==============================]
0.0040
EXPERIMENT NO:– 5
Aim: Applying Generative Adversial Networks for image generation And unsupervised
tasks.
Program:
import torch
import torch.nn as nn
import torchvision
import numpy as np
# Set device
transform =
transforms.Compose([ transfor
ms.ToTensor(),
])
train_dataset = datasets.CIFAR10(root='./data',\
dataloader = torch.utils.data.DataLoader(train_dataset, \
batch_size=32, shuffle=True)
# Hyperparameters
latent_dim = 100
lr = 0.0002
beta1 = 0.5
beta2 = 0.999
num_epochs = 10
class Generator(nn.Module):
def init (self, latent_dim):
self.model = nn.Sequential(
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.BatchNorm2d(128, momentum=0.78),
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.BatchNorm2d(64, momentum=0.78),
nn.ReLU(),
nn.Tanh()
img = self.model(z)
return img
class Discriminator(nn.Module):
self.model = nn.Sequential(
nn.LeakyReLU(0.2),
nn.Dropout(0.25),
nn.ZeroPad2d((0, 1, 0, 1)),
nn.BatchNorm2d(64, momentum=0.82),
nn.LeakyReLU(0.25),
nn.Dropout(0.25),
nn.BatchNorm2d(128, momentum=0.82),
nn.LeakyReLU(0.2),
nn.Dropout(0.25),
nn.BatchNorm2d(256, momentum=0.8),
nn.LeakyReLU(0.25),
nn.Dropout(0.25),
nn.Flatten(),
nn.Linear(256 * 5 * 5, 1),
nn.Sigmoid()
validity =
self.model(img) return
validity
generator = Generator(latent_dim).to(device)
discriminator = Discriminator().to(device)
# Loss function
adversarial_loss = nn.BCELoss()
# Optimizers
optimizer_G = optim.Adam(generator.parameters()\
optimizer_D = optim.Adam(discriminator.parameters()\
# Training loop
real_images =
batch[0].to(device) # Adversarial
ground truths
valid = torch.ones(real_images.size(0), 1,
real_images = real_images.to(device)
# Train Discriminator
optimizer_D.zero_grad()
fake_images = generator(z)
real_loss = adversarial_loss(discriminator\
(real_images), valid)
fake_loss = adversarial_loss(discriminator\
(fake_images.detach()), fake)
d_loss.backward()
optimizer_D.step()
# Train Generator
optimizer_G.zero_grad()
# Generate a batch of
images gen_images =
generator(z)
# Adversarial loss
g_loss.backward()
optimizer_G.step()
# Progress Monitoring
if (i + 1) % 100 == 0:
print(
f"Epoch [{epoch+1}/{num_epochs}]\
if (epoch + 1) % 10 == 0:
with torch.no_grad():
generated = generator(z).detach().cpu()
grid = torchvision.utils.make_grid(generated,\
nrow=4, normalize=True)
plt.axis("off")
plt.show()
Output:
import math
plt.plot(sin_wave[:50])
X_val = []
Y_val = []
X_val.append(sin_wave[i:i+seq_len])
Y_val.append(sin_wave[i+seq_len])
X_val = np.array(X_val)
Y_val = np.array(Y_val)
nepoch = 25
T = 50 # length of sequence
hidden_dim = 100
output_dim = 1
bptt_truncate = 5
min_clip_value = -10
max_clip_value = 10
return 1 / (1 + np.exp(-x))
loss = 0.0
prev_s = np.zeros((hidden_dim, 1)) # here, prev-s is the value of the previous activation of hidden layer; which
is initialized as all zeroes
for t in range(T):
new_input = np.zeros(x.shape) # we then do a forward pass for every timestep in the sequence
new_input[t] = x[t] # for this, we define a single input for that timestep
s = sigmoid(add)
mulv = np.dot(V,
s) prev_s = s
# calculate error
loss_per_record = (y - mulv)**2 / 2
loss += loss_per_record
val_loss = 0.0
for i in range(Y_val.shape[0]):
x, y = X_val[i], Y_val[i]
for t in range(T):
new_input = np.zeros(x.shape)
new_input[t] = x[t]
s = sigmoid(add)
mulv = np.dot(V,
s) prev_s = s
loss_per_record = (y - mulv)**2 / 2
val_loss += loss_per_record
print('Epoch: ', epoch + 1, ', Loss: ', loss, ', Val Loss: ', val_loss)
...
for i in
range(Y.shape[0]): x, y
= X[i], Y[i]
layers = []
dU = np.zeros(U.shape)
dV = np.zeros(V.shape)
dW =
np.zeros(W.shape)
dU_t = np.zeros(U.shape)
dV_t = np.zeros(V.shape)
dW_t =
np.zeros(W.shape)
dU_i = np.zeros(U.shape)
dW_i =
np.zeros(W.shape)
# forward pass
for t in
range(T):
new_input = np.zeros(x.shape)
new_input[t] = x[t]
dmulv = (mulv - y)
# backward pass
for t in
range(T):
ds = dsv
ds = dsv + dprev_s
new_input = np.zeros(x.shape)
new_input[t] = x[t]
dx = np.dot(np.transpose(U), dmulu)
dU_t += dU_i
dW_t += dW_i
dV += dV_t
dU += dU_t
dW += dW_t
# update
U -= learning_rate * dU
V -= learning_rate * dV
W -= learning_rate * dW
On training the above model, we get this output:
Epoch: 1 , Loss: [[101185.61756671]] , Val Loss: [[50591.0340148]]
Epoch: 2 , Loss: [[61205.46869629]] , Val Loss: [[30601.34535365]]
Epoch: 3 , Loss: [[31225.3198258]] , Val Loss: [[15611.65669247]]
Epoch: 4 , Loss: [[11245.17049551]] , Val Loss: [[5621.96780111]]
Epoch: 5 , Loss: [[1264.5157739]] , Val Loss: [[632.02563908]]
Epoch: 6 , Loss: [[20.15654115]] , Val Loss: [[10.05477285]]
Epoch: 7 , Loss: [[17.13622839]] , Val Loss: [[8.55190426]]
Epoch: 8 , Loss: [[17.38870495]] , Val Loss: [[8.68196484]]
Epoch: 9 , Loss: [[17.181681]] , Val Loss: [[8.57837827]]
Epoch: 10 , Loss: [[17.31275313]] , Val Loss: [[8.64199652]]
Epoch: 11 , Loss: [[17.12960034]] , Val Loss: [[8.54768294]]
Epoch: 12 , Loss: [[17.09020065]] , Val Loss: [[8.52993502]]
Epoch: 13 , Loss: [[17.17370113]] , Val Loss: [[8.57517454]]
Epoch: 14 , Loss: [[17.04906914]] , Val Loss: [[8.50658127]]
Epoch: 15 , Loss: [[16.96420184]] , Val Loss: [[8.46794248]]
Epoch: 16 , Loss: [[17.017519]] , Val Loss: [[8.49241316]]
Epoch: 17 , Loss: [[16.94199493]] , Val Loss: [[8.45748739]]
Epoch: 18 , Loss: [[16.99796892]] , Val Loss: [[8.48242177]]
Epoch: 19 , Loss: [[17.24817035]] , Val Loss: [[8.6126231]]
Epoch: 20 , Loss: [[17.00844599]] , Val Loss: [[8.48682234]]
Epoch: 21 , Loss: [[17.03943262]] , Val Loss: [[8.50437328]]
Epoch: 22 , Loss: [[17.01417255]] , Val Loss: [[8.49409597]]
Epoch: 23 , Loss: [[17.20918888]] , Val Loss: [[8.5854792]]
plt.show()
preds = []
for i in
range(Y_val.shape[0]): x, y
= X_val[i], Y_val[i]
for t in range(T):
mulu = np.dot(U,
x)
mulw = np.dot(W,
mulu
s = sigmoid(add)
mulv = np.dot(V,
s) prev_s = s
preds.append(mulv)
preds = np.array(preds)
plt.show()
OUTPUT:
0.127191931509431
EXPERIMENT NO:-07
AIM: Build and train a Long Short-Term Memory (LSTM) model using
TensorFlow and Keras.
Program:
import numpy as np
return X, y
# Parameters
sequence_length = 10
num_samples = 1000
# Data preparation
X, y = generate_data(sequence_length, num_samples)
model = Sequential([
Dense(1)
])
model.compile(optimizer='adam', loss='mse')
predictions = model.predict(X_test[:5])
print("Predictions:", predictions.flatten())
print("Actual:", y_test[:5])
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
Output:
Training Progress:
Example:
arduino
Copy code
Epoch 1/20
...
Epoch 20/20
Model Evaluation:
yaml
Copy code
Sample Predictions:
makefile
Copy code
Loss Plot:
A graph showing the decreasing trend of training and validation loss over epochs.