DLT Lab PDF
DLT Lab PDF
Aim: To create and evaluate the performance of a basic feed-forward neural network by varying batch
size, hidden layers, and learning rates.
Algorithm:
Program:
Flower recognition
# Ignore the warnings import
warnings
warnings.filterwarnings('always')
warnings.filterwarnings('ignore')
numpy as np
import pandas as pd
as sns #configure
# sets matplotlib to inline and displays graphs below the corressponding cell.
sns.set(style='whitegrid',color_codes=True)
#model selection
#dl libraraies
import tensorflow as tf
seaborn as sns
# specifically for manipulating zipped images and getting numpy arrays of pixel values of images.
import cv2
Image
X=[]
Z=[] IMG_SIZE=150
FLOWER_DAISY_DIR='../input/flowers/flowers/daisy'
FLOWER_SUNFLOWER_DIR='../input/flowers/flowers/sunflower'
FLOWER_TULIP_DIR='../input/flowers/flowers/tulip'
FLOWER_DANDI_DIR='../input/flowers/flowers/dandelion'
FLOWER_ROSE_DIR='../input/flowers/flowers/rose'
def assign_label(img,flower_type):
return flower_type
def make_train_data(flower_type,DIR):
label=assign_label(img,flower_type) path =
os.path.join(DIR,img)
cv2.resize(img, (IMG_SIZE,IMG_SIZE))
X.append(np.array(img)) Z.append(str(label))
model = Sequential()
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Conv2D(filters = 96, kernel_size = (3,3),padding = 'Same',activation ='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Flatten()) model.add(Dense(512))
model.add(Activation('relu')) model.add(Dense(5,
epochs=50
red_lr= ReduceLROnPlateau(monitor='val_acc',patience=3,verbose=1,factor=0.1)
datagen = ImageDataGenerator(
datagen.fit(x_train)
model.compile(optimizer=Adam(lr=0.001),loss='categorical_crossentropy',metrics=['accuracy'])
plt.plot(History.history['val_loss'])
plt.xlabel('Epochs') plt.legend(['train',
'test']) plt.show()
plt.plot(History.history['acc'])
plt.plot(History.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy') plt.xlabel('Epochs')
result=model.predict(x_test)
cm=confusion_matrix(result,y_test)
sns.heatmap(cm,annot=True)
OUTPUT:
RESULT:
Successfully created and evaluated the performance of a basic feed-forward neural network by varying
batch size, hidden layers, and learning rates.
2. Solve XOR problem using Multi Layer Perceptron
Aim: To solve the XOR problem using a Multi-Layer Perceptron (MLP) and analyze its performance.
Algorithm:
Program:
tensorflow as tf
Sequential()
_, accuracy = model.evaluate(X, y)
print(f'Accuracy: {accuracy*100:.2f}%') #
Making predictions
predictions = model.predict(X)
print(f'Predictions: {predictions}')
Output:
Accuracy: 75.00%
Predictions: [1, 1, 1, 0]
Result: Successfully implemented a Multi-Layer Perceptron to solve the XOR problem and evaluated its
performance.
3. Implement Recurrent Neural Network (RNN)
Aim: To implement a Recurrent Neural Network (RNN) for processing sequential data.
Algorithm:
Program:
hstack
print(os.listdir("../input/bike-sharing-dataset"))
dataset = pd.read_csv('../input/bike-sharing-dataset/day.csv')
plt.figure(figsize=(15,10))
plt.plot(dataset['cnt'], color='blue') plt.show()
temp[temp.mnth == 10]
print(temp.cnt.mean()) temp.head()
dataset.join(one_hot)
dataset.join(one_hot)
pd.DataFrame(scaled)
series.columns = ['cntscl']
dataset.head()
number_of_test_data = 50
number_of_holdout_data = 50
datatrain = dataset[:number_of_training_data]
datatrain_feed = hstack((in_seq1, in_seq2, in_seq3, in_seq4, in_seq5, in_seq6, in_seq7, in_seq8, in_seq9,
in_seq10, in_seq11, in_seq12, in_seq13, in_seq14, in_seq15, in_seq16, out_seq_train))
datatest_feed = hstack((in_seq1, in_seq2, in_seq3, in_seq4, in_seq5, in_seq6, in_seq7, in_seq8, in_seq9,
in_seq10, in_seq11, in_seq12, in_seq13, in_seq14, in_seq15, in_seq16, out_seq_test))
datahold_feed = hstack((in_seq1, in_seq2, in_seq3, in_seq4, in_seq5, in_seq6, in_seq7, in_seq8, in_seq9,
in_seq10, in_seq11, in_seq12, in_seq13, in_seq14, in_seq15, in_seq16, out_seq_hold))
Sequential()
model.add(Dense(1, activation='relu'))
model.summary()
OUTPUT
=================================================================
=================================================================
Total params: 93
Trainable params: 93
Non-trainable params: 0
Result: Successfully implemented a Recurrent Neural Network for processing sequential data and
evaluated its accuracy.
4.Implement Long Short-Term Memory (LSTM)
Aim: To implement and evaluate an LSTM network for processing sequential data.
Algorithm:
Program:
print('Loading data...')
# num_words: how many unique words that you want to load into your training and testing dataset (x_train,
model.add(Embedding(20000, 128))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=32,
epochs=15, verbose=2,
validation_data=(x_test, y_test))
def show_train_history(train_history,train,validation):
plt.plot(train_history.history[train])
plt.plot(train_history.history[validation]) plt.title('Train
History')
plt.ylabel(train)
plt.xlabel('Epoch')
plt.show()
model.evaluate(x_test, y_test,
batch_size=32,
verbose=2)
accuracy:', acc)
OUTPUT
Result: Successfully implemented and evaluated an LSTM network for processing sequential data with
improved prediction accuracy.
5. Neural Network Models using TensorFlow and Keras
Aim: To implement a Convolutional Neural Network (CNN) using TensorFlow and Keras.
Algorithm:
Program:
tensorflow as tf
MNIST dataset
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
the model
accuracy: {test_acc}")
OUTPUT:
Result: Successfully implemented a Convolutional Neural Network using TensorFlow and Keras and
evaluated its performance on the dataset.
6. Implement Text Classifier using RNN
Algorithm:
Program:
import tensorflow as tf
numpy as np
tfds.load('imdb_reviews', as_supervised=True)
= 32
train_dataset.batch(batch_size) test_dataset =
next(iter(train_dataset)) print('Text:\n',
example.numpy()[0])
integers.
encoder = tf.keras.layers.TextVectorization(max_tokens=10000)
= np.array(encoder.get_vocabulary())
encoder(original_text).numpy()
the model
model =
tf.keras.Sequential([ encoder,
tf.keras.layers.Embedding(
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(64, return_sequences=True)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)),
])
model.summary()
model.compile(
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(),
metrics=['accuracy']
model.fit(
train_dataset, epochs=5,
validation_data=test_dataset,
history
history_dict = history.history
history_dict['accuracy']
val_acc = history_dict['val_accuracy']
history_dict['loss']
plt.figure(figsize=(8, 4))
plt.subplot(1, 2, 1) plt.plot(acc)
plt.plot(val_acc)
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.subplot(1, 2, 2)
plt.plot(loss) plt.plot(val_loss)
plt.title('Training and Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
sample_text = (
'''The movie by GeeksforGeeks was so good and the animation are so dope. I would
predictions = model.predict(np.array([sample_text]))
print(*predictions[0])
if predictions[0] > 0:
Result: Successfully built a text classifier using an RNN for sentiment analysis and evaluated its
performance on the test data.
7. Image Classifier using CNN
Algorithm:
Program:
import keras
numpy as np
import pandas as pd
import itertools
"Train Images"
every image
x_train = np.array(x_train,dtype='float32')/255 x_test =
data['testX']
Label of Images
data['testY']
{}'.format(y_train))
im_rows=112
im_cols=92 batch_size=512
MaxPooling2D(pool_size=2),
MaxPooling2D(pool_size=2),
Flatten(),
Dense(2024, activation='relu'),
Dropout(0.5),
Dense(1024, activation='relu'),
Dropout(0.5),
Dense(512, activation='relu'),
Dropout(0.5),
activation='softmax')
])
cnn_model.compile( loss='sparse_categorical_crossentropy',#'categorical_cro
ssentropy',
optimizer=Adam(lr=0.0001),
metrics=['accuracy']
cnn_model.summary()
history=cnn_model.fit(
verbose=2, validation_data=(np.array(x_valid),np.array(y_valid)),
{:.4f}'.format(scor[0]))
print(history.history.keys())
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.xlabel('epoch')
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.xlabel('epoch')
OUTPUT
Aim: To design a CNN-based system for object detection and classification in traffic analysis.
Algorithm:
Program:
import tensorflow as tf
Concatenate
numpy as np
# Constants
BATCH_SIZE = 16
EPOCHS = 20
# Image Data Generator for traffic images (assumes labels include both classification and bounding boxes)
"path_to_validation_data"
train_datagen =
ImageDataGenerator( rescale=1.0 / 255.0,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
train_generator =
train_datagen.flow_from_directory( train_data_dir,
target_size=(IMG_WIDTH, IMG_HEIGHT),
batch_size=BATCH_SIZE, class_mode='categorical'
validation_generator =
train_datagen.flow_from_directory( validation_data_dir,
target_size=(IMG_WIDTH, IMG_HEIGHT),
batch_size=BATCH_SIZE, class_mode='categorical'
# Define a custom CNN model for both classification and bounding box regression def
create_model():
# Input layer
MaxPooling2D(pool_size=(2, 2))(x)
MaxPooling2D(pool_size=(2, 2))(x)
MaxPooling2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x) x =
Dropout(0.5)(x)
model
create_model()
# Compile the model with two loss functions: one for classification and one for bounding box regression
model.compile( optimizer=Adam(learning_rate=0.0
001), loss={
'class_output': 'categorical_crossentropy',
'bbox_output': 'mean_squared_error'
},
metrics={
'class_output': 'accuracy',
'bbox_output': 'mse'
model.fit(
train_generator, steps_per_epoch=train_generator.samples //
BATCH_SIZE, epochs=EPOCHS,
validation_data=validation_generator,
validation_steps=validation_generator.samples // BATCH_SIZE
model.save('traffic_object_detection_model.h5')
OUTPUT:
Result: Successfully designed a CNN-based system for object detection and classification in traffic
analysis and evaluated its effectiveness.
9. Image Augmentation using Deep Restricted Boltzmann Machine (RBM)
Algorithm:
Program:
import numpy as np
import tensorflow as tf
matplotlib.pyplot as plt
= visible_units
self.hidden_units = hidden_units
self.learning_rate = learning_rate
batch_size
self.v_bias = np.zeros(visible_units)
return 1 / (1 + np.exp(-x))
hidden_prob, hidden_state
visible_prob
np.dot(visible.T, hidden_prob)
# Reconstruct the visible layer and run forward pass again visible_reconstructed =
self.backward(hidden_state) hidden_reconstructed_prob, _ =
self.forward(visible_reconstructed)
train(self, data):
np.random.shuffle(data)
for batch in range(0, data.shape[0], self.batch_size):
self.contrastive_divergence(batch_data)
generate_image(self, hidden_sample):
visible_reconstructed
# Parameters
visible_units = x_train.shape[1]
hidden_units = 256
epochs = 10
learning_rate = 0.1
batch_size = 64
rbm.train(x_train)
# Image augmentation by generating new samples from the hidden layer hidden_samples =
rbm.generate_image(hidden_samples)
plt.imshow(generated_image, cmap="gray")
plt.show()
Output:
Result: Successfully implemented a deep RBM for image augmentation and generation, visualizing the
generated images effectively.
10. Sentiment Analysis using LSTM
Aim: To implement sentiment analysis using LSTM.
Algorithm:
Program:
print('Loading data...')
# num_words: how many unique words that you want to load into your training and testing dataset (x_train,
sequence.pad_sequences(x_test, maxlen=80)
model = Sequential()
model.add(Embedding(20000, 128))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=32,
epochs=15, verbose=2,
validation_data=(x_test, y_test))
def show_train_history(train_history,train,validation):
plt.plot(train_history.history[train])
plt.plot(train_history.history[validation])
plt.title('Train History')
plt.ylabel(train)
plt.xlabel('Epoch')
model.evaluate(x_test, y_test,
batch_size=32,
verbose=2)
accuracy:', acc)
OUTPUT
Result: Successfully implemented sentiment analysis using LSTM and evaluated the model's
performance across epochs.