Deep Learning Practical
Deep Learning Practical
import numpy as np
import pandas as pd
inputs = np.array([[0,0],[0,1],[1,0],[1,1]])
expected_output = np.array([[0],[1],[1],[0]])
epochs = 100
lr = 0.1
def sigmoid_derivative(x):
return x * (1 - x)
hidden_weights = np.random.uniform(size=(2,2))
hidden_bias =np.random.uniform(size=(1,2))
output_weights = np.random.uniform(size=(2,1))
output_bias = np.random.uniform(size=(1,1))
print(*hidden_weights)
print(*hidden_bias)
print(*output_weights)
print(*output_bias
Exp2.How to Implement CNN in Python?
fromtensorflow.keras.datasetsimportmnist
fromtensorflow.keras.modelsimportSequential
fromtensorflow.keras.layersimportConv2D
fromtensorflow.keras.layersimportMaxPool2D
fromtensorflow.keras.layersimportFlatten
fromtensorflow.keras.layersimportDropout
2. #loading data
(X_train,y_train) , (X_test,y_test)=mnist.load_data()
3. #reshaping data
X_test = X_test.reshape((X_test.shape[0],X_test.shape[1],X_test.shape[2],1))
print(X_train.shape)
print(X_test.shape)
X_train=X_train/255
X_test=X_test/255
6. #defining model
model=Sequential()model=Sequential()
model.add(Conv2D(32,(3,3),activation=’relu’,input_shape=(28,28,1)))
model.add(MaxPool2D(2,2))
model.add(Flatten())
model.add(Dense(100,activation=’relu’))
model.add(Dense(10,activation=’softmax’))
model.compile(loss=’sparse_categorical_crossentropy’,optimizer=’adam’,metrics=[‘accuracy’])
model.fit(X_train,y_train,epochs=10)
Exp-3) Implement Auto-encoders for any of the task including.
a) Data Compression
1 import numpy as np
2 encoding_dim = 32 # 32 floats -> compression of factor 24.5, assuming the input is 784 floats
5 input_img = Input(shape=(784,))
12 decoder_layer = autoencoder.layers[-1]
15 # configure our model to use a per-pixel binary crossentropy loss, and the Adadelta optimizer:
16 autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
Preparing the input data (MNIST Dataset)
2 # normalize all values between 0 and 1 and we will flatten the 28x28 images into vectors of size 784.
7 print x_train.shape
8 print x_test.shape
1 autoencoder.fit(x_train, x_train,
2 epochs=50,
3 batch_size=256,
4 shuffle=True,
5 validation_data=(x_test, x_test))
8 encoded_imgs = encoder.predict(x_test)
9 decoded_imgs = decoder.predict(encoded_imgs)
Exp-4) Implement an artificial neural network on GPUs
import numpy as np
import tensorflow as tf
from datetime import datetime
# Choose which device you want to test on: either 'cpu' or 'gpu'
devices = ['cpu', 'gpu']
Returns
-------
out : results of the operations as the time taken
"""
if __name__ == '__main__':
# Print the result and also the time taken on the selected device
print("Input shape:", shape, "using Device:", device, "took: {:.2f}".format(time_taken.seconds +
time_taken.microseconds/1e6))
#print("Computation on shape:", shape, "using Device:", device, "took:")
print("--" * 20)
Computation
), using Device: 'cpu' took: 32.81s
Computation on shape: (50, 50), using Device: 'gpu' took: 0.03s
Computation on shape: (500, 500), using Device: 'gpu' took: 0.04s
Computation on shape: (1000, 1000), using Device: 'gpu' took: 0.04s
Computation on shape: (10000, 10000), using Device: 'gpu' took: 0.05s
Exp-5) Design RNN or its variant including LSTM or GRU.
import numpy as np
import math
import tensorflow as tf
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
tf.random.set_seed(7)
dataset = dataframe.values
dataset = dataset.astype('float32')
_error', optimizer='adam')
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
plt.plot(scaler.inverse_transform(dataset))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()
Exp-6) Design and implement a CNN for Image Classification
import tensorflow as tf
tf.enable_eager_execution()
random.shuffle(training)
X = X.astype('float32')
X /= 255
from keras.utils import np_utils
Y = np_utils.to_categorical(y, 4)
print(Y[100])
print(shape(Y))
batch_size = 16
nb_classes =4
nb_epochs = 5
img_rows, img_columns = 200, 200
img_channel = 3
nb_filters = 32
nb_pool = 2
nb_conv = 3
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, (3,3), padding='same', activation=tf.nn.relu,
input_shape=(200, 200, 3)),
tf.keras.layers.MaxPooling2D((2, 2), strides=2),
tf.keras.layers.Conv2D(32, (3,3), padding='same', activation=tf.nn.relu),
tf.keras.layers.MaxPooling2D((2, 2), strides=2),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(4, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
model.fit(X_train, y_train, batch_size = batch_size, epochs = nb_epochs, verbose = 1, validation_data = (X_test,
y_test))
Step 9: Accuracy and Score of Model
# scale pixels
def prep_pixels(train, test):
# convert from integers to floats
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize to range 0-1
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return normalized images
return train_norm, test_norm