DL Practical
DL Practical
import tensorflow as tf
print("*********************************")
print("Matrix Multiplication")
x = tf.constant([2,3,4,4,3,1],shape=[3,2])
print(x)
y = tf.constant([1,2,3,5,6,9],shape = [2,3])
print(y)
z = tf.matmul(x,y)
e_matrix_A=tf.random.uniform([2,2],minval=3,maxval=10,dtype=tf.float3
2,name="matrixA") print("MatrixA:\n{}\n\n".format(e_matrix_A))
eigen_values_A,eigen_vectors_A=tf.linalg.eigh(e_matrix_A)
print("EigenVectors:\n{}\n\nEigenValues:\n{}\
n".format(eigen_vectors_A,eigen_values_A))
PRACTICAL 2
import numpy as np
model=Sequential()
model.add(Dense(units=2,activation='relu',input_dim=2))
model.add(Dense(units=1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['ac
curacy'])
print(model.summary())
print(model.get_weights())
X=n
p.array([[0.,0.],[0.,1.],[1.,0.],[1.,1.]])
Y=np.array([0.,1.,1.,0.])
model.fit(X,Y,epochs=1000,batch_size=4)
print(model.get_weights())
print(model.predict(X,batch_size=4))
PRACTICAL 3
data = load_breast_cancer()
X = data.data
y = data.target
model = Sequential()
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy']) # Train the model
Aim: Using deep feed forward network with two hidden layers for
performing multiclass classification and predicting the class.
Code:
data = load_iris()
X = data.data
y = data.target
y = to_categorical(y)
model = Sequential()
model.add(Dense(y_train.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy']) # Train the model
new_data = [[4.9,6.6,3.3,2.7], [6.2, 2.8, 4.8, 1.8], [7.3, 2.9, 6.3, 1.8]]
new_data_predictions = model.predict(new_data)
Output:
PRACTICAL 4 b
Aim: Using a deep feed forward network with two hidden layers
for performing classification and predicting the probability of
class.
import numpy as np
from keras.models import Sequential
data = load_iris()
X = data.data
y = data.target
y = to_categorical(y)
model = Sequential()
model.add(Dense(16, activation='relu'))
model.add(Dense(y_train.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy']) #Model Summary
model.summary()
# Train the model
New_data = [[9.3,4.6,3.5,2.2],[5.5,2.7,1.9,4.4],[7.3,2.9,6.8,2.9]
New_data_predictions = model.predict(new_data)
y_pred = model.predict(x_test)
y_test_class = np.argmax(y_test,axis=1)
#Classification Report
print(classification_report(y_test_class,y_pred_class))
PRACTICAL 4 c
Aim: Using a deep feed forward network with two hidden layers
for performing linear regression and predicting values.
import numpy as np
from keras.models import Sequential
import pandas as pd
df = pd.read_csv("weatherAUS.csv")
df=df[pd.isnull(df['RainTomorrow'])==False]
df=df.fillna(df.mean())
df['RainTomorrowFlag']=df['RainTomorrow'].apply(lambda x: 1 if x=='Yes'
else 0) #Select data for modeling
X=df[['Humidity3pm']]
y=df['RainTomorrowFlag'].values
model = Sequential()
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='linear'))
#Model Summary
model.summary()
# Compile the model
model.compile(loss='mean_squared_error', optimizer='adam')
##### Step
7 - Model Performance Summary
print("")
print("")
print("")
print(classification_report(y_train, pred_labels_tr))
print("")
print(classification_report(y_test, pred_labels_te))
print("")
PRACTICAL 5 a
import numpy as np
import tensorflow as tf
model = tf.keras.Sequential([
tf.keras.layers.Dense(8, activation='relu'),
tf.keras.layers.Dense(1)
])
model.compile(loss='mean_squared_error', optimizer='adam')
num_folds = 5
# Create an empty list to store the evaluation results for each fold
results = []
# Split the data into training and validation sets for this fold
results.append(val_loss)
mean_val_loss = np.mean(results)
std_val_loss = np.std(results)
PRACTICAL 5 b
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
def create_model():
model = Sequential()
model.add(Dense(4, activation='relu'))
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy']) return model
accuracies = []
model = create_model()
accuracies.append(accuracy)
Y_pred = model.predict(X_new)
for i in range(len(X_new)):
print(f"X={X_new[i]}, Predicted_probabilities={Y_pred[i]},
Predicted_class={np.argmax(Y_pred[i])}"
PRACTICAL 6
X,Y=make_moons(n_samples=100,noise=0.2,random_state=1)
n_train=30
trainX,testX=X[:n_train,:],X[n_train:]
trainY,testY=Y[:n_train],Y[n_train:]
model=Sequential()
model.add(Dense(500,input_dim=2,activation='relu'))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['ac
curacy'])
history=model.fit(trainX,trainY,validation_data=(testX,testY),epochs=400
0)
pyplot.plot(history.history['accuracy'],label='train')
pyplot.plot(history.history['val_accuracy'],label='test')
pyplot.legend()
pyplot.show()
X,Y=make_moons(n_samples=100,noise=0.2,random_state=1)
n_train=30
trainX,testX=X[:n_train,:],X[n_train:]
trainY,testY=Y[:n_train],Y[n_train:]
model=Sequential()
model.add(Dense(500,input_dim=2,activation='relu',kernel_regularizer=l1
_l2(l1=0.001,l2=0.001))) model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['ac
curacy'])
history=model.fit(trainX,trainY,validation_data=(testX,testY),epochs=400
0)
pyplot.plot(history.history['accuracy'],label='train')
pyplot.plot(history.history['val_accuracy'],label='test')
pyplot.legend()
pyplot.show()
PRACTICAL 7
import numpy as np
import pandas as pd
dataset_train=pd.read_csv('Google_Stock_price_Train.csv')
#print(dataset_train)
training_set=dataset_train.iloc[:,1:2].values
#print(training_set)
sc=MinMaxScaler(feature_range=(0,1))
training_set_scaled=sc.fit_transform(training_set)
#print(training_set_scaled)
X_train=[]
Y_train=[]
for i in range(60,1258):
X_train.append(training_set_scaled[i-60:i,0])
Y_train.append(training_set_scaled[i,0])
X_train,Y_train=np.array(X_train),np.array(Y_train)
print(X_train)
print('*********************************************')
print(Y_train)
X_train=np.reshape(X_train,(X_train.shape[0],X_train.shape[1],1))
print('**********************************************')
print(X_train)
regressor=Sequential()
regressor.add(LSTM(units=50,return_sequences=True,input_shape=(X_trai
n.shape[1],1))) regressor.add(Dropout(0.2))
regressor.add(LSTM(units=50,return_sequences=True))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units=50,return_sequences=True))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units=50))
regressor.add(Dropout(0.2))
regressor.add(Dense(units=1))
regressor.compile(optimizer='adam',loss='mean_squared_error')
regressor.fit(X_train,Y_train,epochs=100,batch_size=32)
dataset_test=pd.read_csv('Google_Stock_price_Test.csv')
real_stock_price=dataset_test.iloc[:,1:2].values
dataset_total=pd.concat((dataset_train['Open'],dataset_test['Open']),axis
=0) inputs=dataset_total[len(dataset_total)-len(dataset_test)-60:].values
inputs=inputs.reshape(-1,1)
inputs=sc.transform(inputs)
X_test=[]
for i in range(60,80):
X_test.append(inputs[i-60:i,0])
X_test=np.array(X_test)
X_test=np.reshape(X_test,(X_test.shape[0],X_test.shape[1],1))
predicted_stock_price=regressor.predict(X_test)
predicted_stock_price=sc.inverse_transform(predicted_stock_price)
plt.xlabel('time')
plt.legend()
plt.show()
PRACTICAL 8
import numpy as np
model = Sequential()
# Encoder layers
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
# Decoder layers
model.add(Dense(128, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dense(784, activation='sigmoid'))
model.compile(optimizer=Adam(), loss='binary_crossentropy')
encoded_imgs = model.predict(x_test)
decoded_imgs = model.predict(encoded_imgs)
plt.figure(figsize=(20, 4))
for i in range(n):
# Original images
ax = plt.subplot(2, n, i + 1)
plt.title('Original')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Reconstructed images
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i], cmap='gray')
plt.title('Reconstructed')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
PRACTICAL 9
import numpy as np
model = Sequential()
model.add(MaxPooling2D((2, 2)))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy']) # Train the model
PRACTICAL 10
import keras
import numpy as np
(X_train,_),(X_test,_)=mnist.load_data()
X_train=X_train.astype('float32')/255.
X_test=X_test.astype('float32')/255.
X_train=np.reshape(X_train,(len(X_train),28,28,1))
X_test=np.reshape(X_test,(len(X_test),28,28,1))
noise_factor=0.5
X_train_noisy=X_train+noise_factor*np.random.normal(loc=0.0,scale=1.0
,size=X_train.shape)
X_test_noisy=X_test+noise_factor*np.random.normal(loc=0.0,scale=1.0,si
ze=X_test.shape) X_train_noisy=np.clip(X_train_noisy,0.,1.)
X_test_noisy=np.clip(X_test_noisy,0.,1.)
n=10
plt.figure(figsize=(20,2))
for i in range(1,n+1):
ax=plt.subplot(1,n,i)
plt.imshow(X_test_noisy[i].reshape(28,28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
input_img=keras.Input(shape=(28,28,1))
x=layers.Conv2D(32,(3,3),activation='relu',padding='same')(input_img)
x=layers.MaxPooling2D((2,2),padding='same')(x)
x=layers.Conv2D(32,(3,3),activation='relu',padding='same')(x)
encoded=layers.MaxPooling2D((2,2),padding='same')(x)
x=layers.Conv2D(32,(3,3),activation='relu',padding='same')(encoded)
x=layers.UpSampling2D((2,2))(x)
x=layers.Conv2D(32,(3,3),activation='relu',padding='same')(x)
x=layers.UpSampling2D((2,2))(x)
decoded=layers.Conv2D(1,(3,3),activation='sigmoid',padding='same')(x)
autoencoder=keras.Model(input_img,decoded)
autoencoder.compile(optimizer='adam',loss='binary_crossentropy')
autoencoder.fit(X_train_noisy,X_train,
epochs=3,
batch_size=128,
shuffle=True,
validation_data=(X_test_noisy,X_test),
callbacks=[TensorBoard(log_dir='/tmo/
tb',histogram_freq=0,write_graph=False)])
predictions=autoencoder.predict(X_test_noisy)
m=10
plt.figure(figsize=(20,2))
for i in range(1,m+1):
ax=plt.subplot(1,m,i)
plt.imshow(predictions[i].reshape(28,28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()