0% found this document useful (0 votes)
19 views25 pages

DL Practical

Deep learning practical msc it

Uploaded by

Anu Nadar
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
19 views25 pages

DL Practical

Deep learning practical msc it

Uploaded by

Anu Nadar
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 25

INDEX

Sr.n Topic Date Signat


o ure

1 Performing matrix multiplication and 25/5/


finding Eigen vectors and Eigen values 24
using Tensor Flow

2 Solving XOR problem using deep feed 2/6/2


forward network. 4

3 Implementing deep neural network for 15/6/


performing binary classification task. 24

4 a. Using deep feed forward network with 22/6/


two hidden layers for performing 24
multiclass classification and predicting
the class.

b. Using a deep feed forward network


with two hidden layers for performing
classification and predicting the
probability of class.

c. Using a deep feed forward network


with two hidden layers for performing
linear regression and predicting values.

5 a. Evaluating feed forward deep network 23/6/


for regression using KFold cross 24
validation.

b. Evaluating feed forward deep network


for multiclass Classification using KFold
cross-validation.

6 Implementing regularization to avoid 29/6/


overfitting in binary classification. 24
7 Demonstrate recurrent neural network 30/6/
that learns to perform sequence analysis 24
for stock price.

8 Performing encoding and decoding of 2/7/2


images using deep autoencoder. 4

9 Implementation of convolutional neural 3/7/2


network to predict numbers from number 4
images.

10 Denoising of images using autoencoder. 4/7/2


4

Aim: Performing matrix multiplication and finding Eigen vectors


and Eigen values using Tensor Flow Code and Output:

!pip install tensorflow

import tensorflow as tf

print("*********************************")

print("Matrix Multiplication")

x = tf.constant([2,3,4,4,3,1],shape=[3,2])

print(x)

y = tf.constant([1,2,3,5,6,9],shape = [2,3])

print(y)

z = tf.matmul(x,y)

print(("Product : " ,z))


print("*********************************")

e_matrix_A=tf.random.uniform([2,2],minval=3,maxval=10,dtype=tf.float3
2,name="matrixA") print("MatrixA:\n{}\n\n".format(e_matrix_A))

eigen_values_A,eigen_vectors_A=tf.linalg.eigh(e_matrix_A)

print("EigenVectors:\n{}\n\nEigenValues:\n{}\
n".format(eigen_vectors_A,eigen_values_A))

PRACTICAL 2

Aim: Solving XOR problem using deep feed forward network.

Code and Output:

import numpy as np

from keras.layers import Dense

from keras.models import Sequential

model=Sequential()
model.add(Dense(units=2,activation='relu',input_dim=2))

model.add(Dense(units=1,activation='sigmoid'))

model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['ac
curacy'])

print(model.summary())

print(model.get_weights())

X=n
p.array([[0.,0.],[0.,1.],[1.,0.],[1.,1.]])

Y=np.array([0.,1.,1.,0.])

model.fit(X,Y,epochs=1000,batch_size=4)

print(model.get_weights())

print(model.predict(X,batch_size=4))

PRACTICAL 3

Aim: Implementing deep neural network for performing binary


classification task. Code and Output:
from sklearn.datasets import load_breast_cancer

from sklearn.model_selection import train_test_split

from keras.models import Sequential

from keras.layers import Dense

# Load the breast cancer dataset

data = load_breast_cancer()

X = data.data

y = data.target

# Split the dataset into training and testing sets

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,


random_state=42) # Define the model architecture

model = Sequential()

model.add(Dense(16, input_dim=X_train.shape[1], activation='relu'))

model.add(Dense(8, activation='relu'))

model.add(Dense(1, activation='sigmoid'))

# Compile the model

model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy']) # Train the model

model.fit(X_train, y_train, epochs=50, batch_size=32,


validation_split=0.2)

# Evaluate the model on the test set

loss, accuracy = model.evaluate(X_test, y_test)

print('Test accuracy:', accuracy)


PRACTICAL 4 a

Aim: Using deep feed forward network with two hidden layers for
performing multiclass classification and predicting the class.

Code:

from sklearn.datasets import load_iris

from keras.models import Sequential

from keras.layers import Dense

from keras.utils import to_categorical

from sklearn.model_selection import train_test_split

# Load the iris dataset

data = load_iris()

X = data.data

y = data.target

# Convert y to one-hot encoded target variable

y = to_categorical(y)

# Split the dataset into training and testing sets

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,


random_state=42) # Define the model architecture

model = Sequential()

model.add(Dense(16, input_dim=X_train.shape[1], activation='relu'))


model.add(Dense(8, activation='relu'))

model.add(Dense(y_train.shape[1], activation='softmax'))

# Compile the model

model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy']) # Train the model

model.fit(X_train, y_train, epochs=50, batch_size=32,


validation_split=0.2)

# Evaluate the model on the test set

loss, accuracy = model.evaluate(X_test, y_test)

print('Test accuracy:', accuracy)

# Make predictions on new data

new_data = [[4.9,6.6,3.3,2.7], [6.2, 2.8, 4.8, 1.8], [7.3, 2.9, 6.3, 1.8]]

new_data_predictions = model.predict(new_data)

print('Predictions for new data:', new_data_predictions)

Output:

PRACTICAL 4 b

Aim: Using a deep feed forward network with two hidden layers
for performing classification and predicting the probability of
class.

Code and Output:

import numpy as np
from keras.models import Sequential

from keras.layers import Dense

from keras.utils import to_categorical

from sklearn.datasets import load_iris

from sklearn.model_selection import train_test_split

# Load the iris dataset

data = load_iris()

X = data.data

y = data.target

# Convert y to one-hot encoded target variable

y = to_categorical(y)

# Split the dataset into training and testing sets

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,


random_state=50) # Define the model architecture

model = Sequential()

model.add(Dense(32, input_dim=X_train.shape[1], activation='relu'))

model.add(Dense(16, activation='relu'))

model.add(Dense(y_train.shape[1], activation='softmax'))

# Compile the model

model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy']) #Model Summary

model.summary()
# Train the model

model.fit(X_train, y_train, epochs=100, batch_size=32,


validation_split=0.2)

# Evaluate the model on the test set

loss, accuracy = model.evaluate(X_test, y_test)

print('Test accuracy:', accuracy)

#Make prediction on new data

New_data = [[9.3,4.6,3.5,2.2],[5.5,2.7,1.9,4.4],[7.3,2.9,6.8,2.9]

New_data_predictions = model.predict(new_data)

y_pred_class = np.argmax(model.predict(x_test),axis = -1)

y_pred = model.predict(x_test)

y_test_class = np.argmax(y_test,axis=1)

#Classification Report

from sklearn.metrics import classification_report

print(classification_report(y_test_class,y_pred_class))

PRACTICAL 4 c

Aim: Using a deep feed forward network with two hidden layers
for performing linear regression and predicting values.

Code and Output:

import numpy as np
from keras.models import Sequential

from keras.layers import Dense

from sklearn.datasets import make_regression

from sklearn.model_selection import train_test_split

import pandas as pd

df = pd.read_csv("weatherAUS.csv")

df=df[pd.isnull(df['RainTomorrow'])==False]

df=df.fillna(df.mean())

# Create a flag for RainToday and RainTomorrow, note RainTomorrowFlag


will be our target variable
df['RainTodayFlag']=df['RainToday'].apply(lambda x: 1 if x=='Yes' else 0)

df['RainTomorrowFlag']=df['RainTomorrow'].apply(lambda x: 1 if x=='Yes'
else 0) #Select data for modeling

X=df[['Humidity3pm']]

y=df['RainTomorrowFlag'].values

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,


random_state=0) # Define the model architecture

model = Sequential()

model.add(Dense(64, input_dim=X_train.shape[1], activation='relu'))

model.add(Dense(32, activation='relu'))

model.add(Dense(1, activation='linear'))

#Model Summary

model.summary()
# Compile the model

model.compile(loss='mean_squared_error', optimizer='adam')

# Train the model

model.fit(X_train, y_train, epochs=50, batch_size=32,


validation_split=0.2)

#Use model to make predictions

# Predict class labels on training data

pred_labels_tr = (model.predict(X_train) > 0.5).astype(int)

# Predict class labels on a test data

pred_labels_te = (model.predict(X_test) > 0.5).astype(int)

# Evaluate the model on the test set

loss = model.evaluate(X_test, y_test)

print('Test loss:', loss)

##### Step
7 - Model Performance Summary

from sklearn.metrics import classification_report

print("")

print('-------------------- Model Summary --------------------')

model.summary() # print model summary

print("")

print('-------------------- Weights and Biases --------------------')


for layer in model.layers:

print("Layer: ", layer.name) # print layer name

print(" --Kernels (Weights): ", layer.get_weights()[0]) # weights

print(" --Biases: ", layer.get_weights()[1]) # biases

print("")

print('---------- Evaluation on Training Data ----------')

print(classification_report(y_train, pred_labels_tr))

print("")

print('---------- Evaluation on Test Data ----------')

print(classification_report(y_test, pred_labels_te))

print("")
PRACTICAL 5 a

Aim: Evaluating feed forward deep network for regression using


KFold cross validation. Code and Output:

import numpy as np

import tensorflow as tf

from sklearn.datasets import make_regression

from sklearn.model_selection import KFold

# Generate a random regression dataset

X, y = make_regression(n_samples=1000, n_features=10, noise=0.1)

# Define the model architecture

model = tf.keras.Sequential([

tf.keras.layers.Dense(32, activation='relu', input_shape=(X.shape[1],)),


tf.keras.layers.Dense(16, activation='relu'),

tf.keras.layers.Dense(8, activation='relu'),

tf.keras.layers.Dense(1)

])

# Compile the model

model.compile(loss='mean_squared_error', optimizer='adam')

# Define the number of folds for KFold cross-validation

num_folds = 5

# Define the KFold cross-validation splitter

kfold = KFold(n_splits=num_folds, shuffle=True, random_state=42)

# Create an empty list to store the evaluation results for each fold

results = []

# Loop over each fold

for fold_idx, (train_idx, val_idx) in enumerate(kfold.split(X)):

print('Processing fold', fold_idx+1)

# Split the data into training and validation sets for this fold

X_train, y_train = X[train_idx], y[train_idx]

X_val, y_val = X[val_idx], y[val_idx]


# Train the model on the training set for this fold

model.fit(X_train, y_train, epochs=50, batch_size=32, verbose=0)

# Evaluate the model on the validation set for this fold

val_loss = model.evaluate(X_val, y_val, verbose=0)

print('Validation loss:', val_loss)

# Add the validation loss to the results list

results.append(val_loss)

# Compute the mean and standard deviation of the validation losses

mean_val_loss = np.mean(results)

std_val_loss = np.std(results)

# Print the results

print('Mean validation loss:', mean_val_loss)

print('Standard deviation of validation loss:', std_val_loss)

PRACTICAL 5 b

Aim: Evaluating feed forward deep network for multiclass


Classification using KFold cross-validation. Code and Output:
import numpy as np

from keras.models import Sequential

from keras.layers import Dense

from keras.utils import to_categorical

from sklearn.datasets import make_blobs

from sklearn.preprocessing import MinMaxScaler

from sklearn.model_selection import KFold

# Generate a multiclass dataset

X, Y = make_blobs(n_samples=100, centers=3, n_features=2,


random_state=1)

Y = to_categorical(Y) # Convert labels to one-hot encoding

# Scale the input features

scaler = MinMaxScaler()

X = scaler.fit_transform(X)

# Define the model creation function

def create_model():

model = Sequential()

model.add(Dense(4, input_dim=2, activation='relu'))

model.add(Dense(4, activation='relu'))

model.add(Dense(3, activation='softmax')) # Output layer for 3 classes

model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy']) return model

# Implement KFold cross-validation

kf = KFold(n_splits=5, shuffle=True, random_state=1)

accuracies = []

for train_index, test_index in kf.split(X):

X_train, X_test = X[train_index], X[test_index]

Y_train, Y_test = Y[train_index], Y[test_index]

model = create_model()

model.fit(X_train, Y_train, epochs=500, verbose=0)


_, accuracy = model.evaluate(X_test, Y_test, verbose=0)

accuracies.append(accuracy)

print(f'Cross-validated accuracies: {accuracies}')

print(f'Mean accuracy: {np.mean(accuracies)}')

# Predict on new data

X_new, Y_real = make_blobs(n_samples=3, centers=3, n_features=2,


random_state=1) X_new = scaler.transform(X_new)

Y_pred = model.predict(X_new)

for i in range(len(X_new)):

print(f"X={X_new[i]}, Predicted_probabilities={Y_pred[i]},
Predicted_class={np.argmax(Y_pred[i])}"

PRACTICAL 6

Aim: Implementing regularization to avoid overfitting in binary


classification. Code and Output:

from matplotlib import pyplot

from sklearn.datasets import make_moons

from keras.models import Sequential

from keras.layers import Dense

X,Y=make_moons(n_samples=100,noise=0.2,random_state=1)

n_train=30

trainX,testX=X[:n_train,:],X[n_train:]

trainY,testY=Y[:n_train],Y[n_train:]

model=Sequential()

model.add(Dense(500,input_dim=2,activation='relu'))

model.add(Dense(1,activation='sigmoid'))

model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['ac
curacy'])
history=model.fit(trainX,trainY,validation_data=(testX,testY),epochs=400
0)

pyplot.plot(history.history['accuracy'],label='train')

pyplot.plot(history.history['val_accuracy'],label='test')

pyplot.legend()

pyplot.show()

#After applying regularizers 11_12

from matplotlib import pyplot

from sklearn.datasets import make_moons

from keras.models import Sequential

from keras.layers import Dense

from keras.regularizers import l1_l2

X,Y=make_moons(n_samples=100,noise=0.2,random_state=1)

n_train=30

trainX,testX=X[:n_train,:],X[n_train:]

trainY,testY=Y[:n_train],Y[n_train:]

model=Sequential()

model.add(Dense(500,input_dim=2,activation='relu',kernel_regularizer=l1
_l2(l1=0.001,l2=0.001))) model.add(Dense(1,activation='sigmoid'))

model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['ac
curacy'])

history=model.fit(trainX,trainY,validation_data=(testX,testY),epochs=400
0)

pyplot.plot(history.history['accuracy'],label='train')

pyplot.plot(history.history['val_accuracy'],label='test')

pyplot.legend()

pyplot.show()
PRACTICAL 7

Aim: Demonstrate recurrent neural network that learns to


perform sequence analysis for stock price. Code and Output:

import numpy as np

import matplotlib.pyplot as plt

import pandas as pd

from keras.models import Sequential

from keras.layers import Dense

from keras.layers import LSTM

from keras.layers import Dropout

from sklearn.preprocessing import MinMaxScaler

dataset_train=pd.read_csv('Google_Stock_price_Train.csv')

#print(dataset_train)

training_set=dataset_train.iloc[:,1:2].values

#print(training_set)

sc=MinMaxScaler(feature_range=(0,1))

training_set_scaled=sc.fit_transform(training_set)

#print(training_set_scaled)

X_train=[]

Y_train=[]

for i in range(60,1258):

X_train.append(training_set_scaled[i-60:i,0])

Y_train.append(training_set_scaled[i,0])

X_train,Y_train=np.array(X_train),np.array(Y_train)

print(X_train)

print('*********************************************')

print(Y_train)

X_train=np.reshape(X_train,(X_train.shape[0],X_train.shape[1],1))

print('**********************************************')
print(X_train)

regressor=Sequential()

regressor.add(LSTM(units=50,return_sequences=True,input_shape=(X_trai
n.shape[1],1))) regressor.add(Dropout(0.2))

regressor.add(LSTM(units=50,return_sequences=True))

regressor.add(Dropout(0.2))

regressor.add(LSTM(units=50,return_sequences=True))

regressor.add(Dropout(0.2))

regressor.add(LSTM(units=50))

regressor.add(Dropout(0.2))

regressor.add(Dense(units=1))

regressor.compile(optimizer='adam',loss='mean_squared_error')

regressor.fit(X_train,Y_train,epochs=100,batch_size=32)

dataset_test=pd.read_csv('Google_Stock_price_Test.csv')

real_stock_price=dataset_test.iloc[:,1:2].values

dataset_total=pd.concat((dataset_train['Open'],dataset_test['Open']),axis
=0) inputs=dataset_total[len(dataset_total)-len(dataset_test)-60:].values

inputs=inputs.reshape(-1,1)

inputs=sc.transform(inputs)

X_test=[]

for i in range(60,80):

X_test.append(inputs[i-60:i,0])

X_test=np.array(X_test)

X_test=np.reshape(X_test,(X_test.shape[0],X_test.shape[1],1))

predicted_stock_price=regressor.predict(X_test)

predicted_stock_price=sc.inverse_transform(predicted_stock_price)

plt.plot(real_stock_price,color='red',label='real google stock price')


plt.plot(predicted_stock_price,color='blue',label='predicted stock price')

plt.xlabel('time')

plt.ylabel('google stock price')

plt.legend()

plt.show()

PRACTICAL 8

Aim: Performing encoding and decoding of images using deep


autoencoder. Code and Output:

import numpy as np

from keras.datasets import mnist

from keras.models import Sequential

from keras.layers import Dense, Flatten, Reshape

from keras.optimizers import Adam

import matplotlib.pyplot as plt

# Load MNIST dataset

(x_train, _), (x_test, _) = mnist.load_data()

# Normalize pixel values between 0 and 1

x_train = x_train.astype('float32') / 255.0

x_test = x_test.astype('float32') / 255.0

# Reshape images to a flattened vector

x_train = x_train.reshape((-1, 784))

x_test = x_test.reshape((-1, 784))

# Define the autoencoder model

model = Sequential()

# Encoder layers

model.add(Dense(256, activation='relu', input_dim=784))

model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))

# Decoder layers

model.add(Dense(128, activation='relu'))

model.add(Dense(256, activation='relu'))

model.add(Dense(784, activation='sigmoid'))

# Compile the model

model.compile(optimizer=Adam(), loss='binary_crossentropy')

# Train the model

model.fit(x_train, x_train, epochs=10, batch_size=32,


validation_data=(x_test, x_test)) # Encode and decode images

encoded_imgs = model.predict(x_test)

decoded_imgs = model.predict(encoded_imgs)

# Reshape decoded images to original shape

decoded_imgs = decoded_imgs.reshape((-1, 28, 28))

# Plot original and reconstructed images

n = 10 # Number of images to display

plt.figure(figsize=(20, 4))

for i in range(n):

# Original images

ax = plt.subplot(2, n, i + 1)

plt.imshow(x_test[i].reshape(28, 28), cmap='gray')

plt.title('Original')

ax.get_xaxis().set_visible(False)

ax.get_yaxis().set_visible(False)

# Reconstructed images

ax = plt.subplot(2, n, i + 1 + n)

plt.imshow(decoded_imgs[i], cmap='gray')

plt.title('Reconstructed')
ax.get_xaxis().set_visible(False)

ax.get_yaxis().set_visible(False)

plt.show()

PRACTICAL 9

Aim: Implementation of convolutional neural network to predict


numbers from number images Code and Output:

import numpy as np

from keras.models import Sequential

from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense

from keras.datasets import mnist

from keras.utils import to_categorical

# Load the MNIST dataset

(X_train, y_train), (X_test, y_test) = mnist.load_data()

# Reshape the input data to 4D tensors with a single channel

X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1],


X_train.shape[2], 1)) X_test = np.reshape(X_test, (X_test.shape[0],
X_test.shape[1], X_test.shape[2], 1)) # Normalize the input data

X_train = X_train.astype('float32') / 255.

X_test = X_test.astype('float32') / 255.

# Convert the labels to one-hot encoding

y_train = to_categorical(y_train, num_classes=10)

y_test = to_categorical(y_test, num_classes=10)

# Define the model architecture

model = Sequential()

model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))

model.add(MaxPooling2D((2, 2)))

model.add(Conv2D(64, (3, 3), activation='relu'))

model.add(MaxPooling2D((2, 2)))
model.add(Flatten())

model.add(Dense(64, activation='relu'))

model.add(Dense(10, activation='softmax'))

# Compile the model

model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy']) # Train the model

model.fit(X_train, y_train, epochs=10, batch_size=32,


validation_data=(X_test, y_test))

# Evaluate the model on test data

score = model.evaluate(X_test, y_test, verbose=0)

print('Test loss:', score[0])

print('Test accuracy:', score[1])

PRACTICAL 10

Aim: Denoising of images using autoencoder.

Code and Output:

import keras

from keras.datasets import mnist

from keras import layers

import numpy as np

from keras.callbacks import TensorBoard

import matplotlib.pyplot as plt

(X_train,_),(X_test,_)=mnist.load_data()

X_train=X_train.astype('float32')/255.

X_test=X_test.astype('float32')/255.

X_train=np.reshape(X_train,(len(X_train),28,28,1))

X_test=np.reshape(X_test,(len(X_test),28,28,1))

noise_factor=0.5
X_train_noisy=X_train+noise_factor*np.random.normal(loc=0.0,scale=1.0
,size=X_train.shape)
X_test_noisy=X_test+noise_factor*np.random.normal(loc=0.0,scale=1.0,si
ze=X_test.shape) X_train_noisy=np.clip(X_train_noisy,0.,1.)

X_test_noisy=np.clip(X_test_noisy,0.,1.)

n=10

plt.figure(figsize=(20,2))

for i in range(1,n+1):

ax=plt.subplot(1,n,i)

plt.imshow(X_test_noisy[i].reshape(28,28))

plt.gray()

ax.get_xaxis().set_visible(False)

ax.get_yaxis().set_visible(False)

plt.show()

input_img=keras.Input(shape=(28,28,1))

x=layers.Conv2D(32,(3,3),activation='relu',padding='same')(input_img)

x=layers.MaxPooling2D((2,2),padding='same')(x)

x=layers.Conv2D(32,(3,3),activation='relu',padding='same')(x)

encoded=layers.MaxPooling2D((2,2),padding='same')(x)

x=layers.Conv2D(32,(3,3),activation='relu',padding='same')(encoded)

x=layers.UpSampling2D((2,2))(x)

x=layers.Conv2D(32,(3,3),activation='relu',padding='same')(x)

x=layers.UpSampling2D((2,2))(x)

decoded=layers.Conv2D(1,(3,3),activation='sigmoid',padding='same')(x)

autoencoder=keras.Model(input_img,decoded)

autoencoder.compile(optimizer='adam',loss='binary_crossentropy')

autoencoder.fit(X_train_noisy,X_train,

epochs=3,
batch_size=128,

shuffle=True,

validation_data=(X_test_noisy,X_test),

callbacks=[TensorBoard(log_dir='/tmo/
tb',histogram_freq=0,write_graph=False)])
predictions=autoencoder.predict(X_test_noisy)

m=10

plt.figure(figsize=(20,2))

for i in range(1,m+1):

ax=plt.subplot(1,m,i)

plt.imshow(predictions[i].reshape(28,28))

plt.gray()

ax.get_xaxis().set_visible(False)

ax.get_yaxis().set_visible(False)

plt.show()

You might also like