Deep Record
Deep Record
AIM:
To write a python program to solve XOR problem using Multilayer Perceptron.
ALGORITHM:
Step – 1: Initialize the input data X and output labels Y for the XOR problem.
Step – 3: Initialize the weights and biases for each layer of the MLP.
Step – 4: Define the activation function for each neuron in the hidden layers.
Step – 5: Choose the loss function that measures the error between the predicted output
and the actual output.
Step – 6: The trained MLP can now be used to predict XOR or similar problems.
PROGRAM:
import numpy as np
return {"dZ2": dZ2, "dW2": dW2, "db2": db2, "dZ1": dZ1, "dW1": dW1, "db1": db1}
def update_params(params, gradients, learning_rate):
params["W1"] -= learning_rate * gradients["dW1"]
params["W2"] -= learning_rate * gradients["dW2"]
params["b1"] -= learning_rate * gradients["db1"]
params["b2"] -= learning_rate * gradients["db2"]
return params
X = np.array([[0, 0, 1, 1], [0, 1, 0, 1]]) # XOR input
RESULT:
Thus, a python program to solve XOR problem using Multilayer Perceptron has been executed
successfully and the output got verified.
310120243001
PROGRAM:
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model =
keras.Sequential([ keras.layers.Flatten(input
_shape=(28, 28)), keras.layers.Dense(128,
activation='relu'), keras.layers.Dropout(0.2),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
test_loss, test_acc = model.evaluate(x_test, y_test)
print(f'Test accuracy: {test_acc}')
predictions = model.predict(x_test)
num_images_to_display = 5
310120243001
plt.figure(figsize=(15, 5))
for i in range(num_images_to_display):
plt.subplot(1, num_images_to_display, i + 1)
plt.imshow(x_test[i], cmap='gray')
plt.title(f"Predicted: {np.argmax(predictions[i])}\nActual: {y_test[i]}")
plt.axis('off')
plt.show()
310120243001
OUTPUT:
RESULT:
Thus, to implement Character and Digit Recognition using ANN in python has been executed
successfully and the output got verified.
310120243001
AIM:
To implement analysis of X-ray image using autoencoders in python.
ALGORITHM:
1. Load medical images, apply random noise transformations, and split the dataset into
training and testing sets.
2. Create data loaders and define an autoencoder model architecture for each noise type
(original, Gaussian, and salt-and-pepper).
3. Train the autoencoder models separately for each noise type using mean squared error loss
and Adam optimizer.
4. Visualize the training progress by plotting epoch-wise average loss for each noise type.
5. Generate image reconstructions using the trained models for both training and testing
datasets.
6. Calculate and display the Peak Signal-to-Noise Ratio (PSNR) values for the
reconstructions of each noise type.
PROGRAM:
import logging
import os
import shutil
import sys
import tempfile
import random
import numpy as np
import matplotlib.pyplot as plt
import torch
from skimage.util import random_noise
from tqdm import trange
from monai.apps import download_and_extract
from monai.config import print_config
from monai.data import CacheDataset, DataLoader
from monai.networks.nets import AutoEncoder
310120243001
print(root_dir)
resource = "https://github.com/Project-MONAI/MONAI-extra-test
data/releases/download/0.8.1/MedNIST.tar.gz"
md5 = "0bc7306e7427e00ad1c5526a6677552d"
compressed_file = os.path.join(root_dir,
"MedNIST.tar.gz") data_dir = os.path.join(root_dir,
"MedNIST")
if not os.path.exists(data_dir):
download_and_extract(resource, compressed_file, root_dir, md5)
scan_type = "Hand"
310120243001
test_transforms = Compose(
[
LoadImageD(keys=["im"]),
EnsureChannelFirstD(keys=["im"]),
ScaleIntensityD(keys=["im"]),
EnsureTypeD(keys=["im"]),
NoiseLambda,
]
)
batch_size = 300
num_workers = 10
train_ds = CacheDataset(train_datadict, train_transforms, num_workers=num_workers)
train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True,
num_workers=num_workers)
test_ds = CacheDataset(test_datadict, test_transforms, num_workers=num_workers)
test_loader = DataLoader(test_ds, batch_size=batch_size, shuffle=True,
num_workers=num_workers)
def get_single_im(ds):
loader = torch.utils.data.DataLoader(ds, batch_size=1, num_workers=10, shuffle=True)
itera = iter(loader)
return next(itera)
data = get_single_im(train_ds)
plot_ims([data["orig"], data["gaus"], data["s&p"]], titles=["orig", "Gaussian", "s&p"])
def train(dict_key_for_training, max_epochs=10, learning_rate=1e-3):
model = AutoEncoder(
spatial_dims=2,
in_channels=1,
310120243001
out_channels=1, channels=(4,
8, 16, 32),
strides=(2, 2, 2, 2),
).to(device)
loss_function = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
epoch_loss_values = []
t = trange(max_epochs, desc=f"{dict_key_for_training} -- epoch 0, avg loss: inf",
leave=True)
for epoch in t:
model.train()
epoch_loss = 0
step = 0
for batch_data in train_loader:
step += 1
inputs = batch_data[dict_key_for_training].to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs, batch_data["orig"].to(device))
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_loss /= step
epoch_loss_values.append(epoch_loss)
t.set_description(f"{dict_key_for_training} -- epoch {epoch + 1}" + f", average loss:
{epoch_loss:.4f}")
return model, epoch_loss_values
max_epochs = 50
310120243001
OUTPUT:
RESULT:
Thus, to implement analysis of X-ray image using autoencoders in python has been executed
successfully and the output got verified.
310120243001
AIM:
To write a python program to implement Speech recognition using NLP.
ALGORITHM:
Step–1: Initialize the necessary libraries and modules, including the speech_recognition
library, which provides speech recognition capabilities.
Step–2: Initialize the recognizer object, which will be used to capture and process audio input.
Step–3: Set up the audio source, such as the microphone, from which the recognizer will
capture audio.
Step–4: Adjust for ambient noise to improve audio recognition accuracy. This step is
essential for filtering out background noise.
Step–5: Capture audio input from the source for a specified duration or until silence is detected.
Step–6: Attempt to recognize the speech from the captured audio using a chosen speech
recognition engine, such as the Google Web Speech API.
PROGRAM:
import speech_recognition as sr
def
speech_recognition_example():
recognizer = sr.Recognizer()
with sr.Microphone() as source:
print("Say something:")
recognizer.adjust_for_ambient_noise(source)
audio = recognizer.listen(source, timeout=5)
try:
text = recognizer.recognize_google(audio)
print(f"You said: {text}")
except sr.UnknownValueError:
print("Speech recognition could not understand audio")
except sr.RequestError as e:
speech_recognition_example(
310120243001
OUTPUT:
RESULT:
Thus, a python program to implement Speech recognition using NLP has been executed
successfully and the output got verified.
310120243001
PROGRAM:
import cv2
import csv
import math
import numpy as np
from tracker import EuclideanDistTracker
cap = cv2.VideoCapture("bridge.mp4")
input_size = 320
tracker = EuclideanDistTracker()
confThreshold = 0.2
nmsThreshold = 0.2
font_color = (0, 0, 255)
font_size = 0.5
font_thickness = 2
middle_line_position = 225
up_line_position = middle_line_position - 15
310120243001
down_line_position = middle_line_position + 15
classesFile = "coco.names"
classNames = open(classesFile).read().strip().split('\n')
print(classNames)
print(len(classNames)) required_class_index = [2, 3, 5, 7] detected_classNames = []
modelConfiguration = 'yolov3-320.cfg'
modelWeights = 'yolov3-320.weights'
net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
np.random.seed(42)
310120243001
if id in temp_up_list:
temp_up_list.remove(id)
down_list[index] = down_list[index] + 1
cv2.circle(img, center, 2, (0, 0, 255), -1)
def postProcess(outputs, img):
global detected_classNames
height, width = img.shape[:2]
boxes = []
classIds = []
confidence_scores = []
detection = []
for output in outputs:
for det in output:
scores = det[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if classId in required_class_index:
if confidence > confThreshold:
w, h = int(det[2] * width), int(det[3] * height)
x, y = int((det[0] * width) - w / 2), int((det[1] * height) - h / 2)
boxes.append([x, y, w, h])
classIds.append(classId)
confidence_scores.append(float(confidence)
for i in indices.flatten():
x, y, w, h = boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3]
color = [int(c) for c in colors[classIds[i]]
name = classNames[classIds[i]]
detected_classNames.append(name)
cv2.putText(img, f'{name.upper()} {int(confidence_scores[i] * 100)}%',
(x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1)
cv2.rectangle(img, (x, y), (x + w, y + h), color, 1)
detection.append([x, y, w, h,
required_class_index.index(classIds[i])])
boxes_ids = tracker.update(detection)
for box_id in boxes_ids:
count_vehicle(box_id, img)
def realTime():
while True:
success, img = cap.read()
img = cv2.resize(img, (0, 0), None, 0.5, 0.5)
ih, iw, channels = img.shape
blob = cv2.dnn.blobFromImage(img, 1 / 255, (input_size, input_size), [0, 0, 0], 1,
crop=False)
net.setInput(blob)
layersNames = net.getLayerNames()
outputNames = [(layersNames[i - 1]) for i in net.getUnconnectedOutLayers()]
outputs = net.forward(outputNames)
postProcess(outputs, img)
310120243001
OUTPUT:
RESULT:
Thus, a python program to design object detection and classification for traffic analysis using
CNN has been executed successfully and the output got verified.
310120243001
AIM:
To write a python program to implement online fraud detection of share market data using
any one of the data analytics tools.
ALGORITHM:
1. Load the share market dataset using pandas.
2. Split the dataset into features (X) and the target variable (y).
3. Divide the data into training and testing sets with an 80-20 split.
4. Train an Isolation Forest model for anomaly detection with a contamination rate of 0.1.
5. Make predictions on the test data and convert the model's output to binary labels (0 for
normal, 1 for anomalous).
PROGRAM:
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import IsolationForest
from sklearn.metrics import classification_report
dataset = pd.read_csv('share_market_data.csv')
X = dataset.drop('Target', axis=1)
y = dataset['Target']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
model = IsolationForest(contamination=0.1, random_state=1)
model.fit(X_train)
y_pred = model.predict(X_test)
y_pred = [1 if x == -1 else 0 for x in y_pred]
print(classification_report(y_test, y_pred))
310120243001
OUTPUT:
RESULT:
Thus, a python program to implement online fraud detection of share market data using any one of the data
analytics tools has been executed successfully and the output got verified
310120243001
AIM:
To write a python program to implement image augmentation using deep RBM
ALGORITHM:
1. Import the necessary libraries, including the Keras ImageDataGenerator and skimage for
image processing.
2. Configure an ImageDataGenerator object with augmentation parameters, such as rotation,
shifting, shearing, zooming, horizontal flipping, and fill mode.
3. Load an input image ('msd_abd/msd/msd.jpg') and reshape it into the required format for
augmentation.
4. Iterate through the augmented image generation process, applying transformations to
create new images and saving them to the 'augmented' directory.
5. Load a set of images from a specified directory ('msd_abd/') using flow_from_directory
and apply the same augmentation transformations to generate additional augmented images,
saving them in the 'augmented' directory.
6. Store the augmented images in the 'augmented' directory, considering the specified batch
size and limiting the number of generated images to 32 in this case.
PROGRAM:
from keras.preprocessing.image import ImageDataGenerator
from skimage import io
import os
import numpy as np
from PIL import Image
datagen = ImageDataGenerator(
rotation_range=45,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='constant', cval=125)
x = io.imread('msd_abd/msd/msd.jpg')
x = x.reshape((1, ) + x.shape)
310120243001
i=0
for batch in datagen.flow(x, batch_size=16, save_to_dir='augmented', save_prefix='aug',
save_format='png'):
i += 1
if i > 20:
break
SIZE = 128
dataset = []
image_directory = 'test_folder/'
my_images = os.listdir(image_directory)
310120243001
OUTPUT:
RESULT:
Thus, a python program to implement image augmentation using deep RBM has been executed successfully
and the output got verified
310120243001
AIM:
To write a python program to implement Sentiment Analysis using LSTM.
ALGORITHM:
PROGRAM:
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
import re
data = pd.read_csv('../input/Sentiment.csv')
data = data[['text', 'sentiment']]
data = data[data.sentiment != "Neutral"]
data['text'] = data['text'].apply(lambda x: x.lower())
data['text'] = data['text'].apply(lambda x: re.sub('[^a-zA-z0-9\s]', '', x))
max_fatures = 2000
tokenizer = Tokenizer(num_words=max_fatures, split=' ')
tokenizer.fit_on_texts(data['text'].values)
X = tokenizer.texts_to_sequences(data['text'].values)
X = pad_sequences(X)
310120243001
embed_dim = 128
lstm_out = 196
model = Sequential()
model.add(Embedding(max_fatures, embed_dim, input_length=X.shape[1])
model.add(SpatialDropout1D(0.4))
model.add(LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
310120243001
Y = pd.get_dummies(data['sentiment']).values
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=42)
batch_size = 32
model.fit(X_train, Y_train, epochs=7, batch_size=batch_size, verbose=2)
validation_size = 1500
X_validate = X_test[-validation_size:]
Y_validate = Y_test[-validation_size:]
X_test = X_test[:-validation_size]
Y_test = Y_test[:-validation_size]
score, acc = model.evaluate(X_test, Y_test, verbose=2, batch_size=batch_size)
pos_cnt, neg_cnt, pos_correct, neg_correct = 0, 0, 0, 0
for x in range(len(X_validate)):
result = model.predict(X_validate[x].reshape(1, X_test.shape[1]), batch_size=1,
verbose=2)[0]
if np.argmax(result) == np.argmax(Y_validate[x]):
if np.argmax(Y_validate[x]) == 0:
neg_correct += 1
else:
pos_correct += 1
if np.argmax(Y_validate[x]) == 0:
neg_cnt += 1else:
pos_cnt += 1
twt = ['Meetings: Because none of us is as dumb as all of us.']
twt = tokenizer.texts_to_sequences(twt)
twt = pad_sequences(twt, maxlen=28, dtype='int32', value=0)
sentiment = model.predict(twt, batch_size=1, verbose=2)[0]
if np.argmax(sentiment) == 0:
print("negative")
elif np.argmax(sentiment) == 1:
print("positive")
310120243001
OUTPUT:
310120243001
RESULT:
Thus, a python program to implement Speech recognition using LSTM has been executed
successfully and the output got verified.
310120243001
AIM:
To write a python program to implement Number plate recognition of traffic video analysis.
ALGORITHM:
1. Capture a video using OpenCV and read the first frame.
2. Convert the frame to grayscale, resize it, and apply Gaussian blurring and edge detection.
3. Find contours in the edge-detected image and identify the contour with the maximum area.
4. Extract the region containing the license plate from the original frame based on the
identified contour's bounding box.
5. Use Pytesseract to perform optical character recognition (OCR) on the license plate region
and extract the license plate number.
6. Print the extracted license plate number.
PROGRAM:
import hydra
import torch
import easyocr
import cv2
from ultralytics.yolo.engine.predictor import BasePredictor
from ultralytics.yolo.utils import DEFAULT_CONFIG, ROOT, ops
from ultralytics.yolo.utils.checks import check_imgsz
from ultralytics.yolo.utils.plotting import Annotator, colors,
save_one_box def getOCR(im, coors):
x,y,w, h = int(coors[0]), int(coors[1]), int(coors[2]),int(coors[3])
im = im[y:h,x:w]
conf = 0.2
gray = cv2.cvtColor(im , cv2.COLOR_RGB2GRAY)
results = reader.readtext(gray)
ocr = ""
ocr = result[1]
if len(results) >1 and len(results[1])>6 and results[2]> conf:
ocr = result[1]
return str(ocr)
class DetectionPredictor(BasePredictor):
def get_annotator(self, img):
return Annotator(img,
line_width=self.args.line_thickness, example=str(self.model.names))
310120243001
im0 = im0.copy()
if self.webcam: # batch_size >= 1
log_string += f'{idx}: '
frame = self.dataset.count
else:
frame = getattr(self.dataset, 'frame', 0)
self.data_path = p
# save_path = str(self.save_dir / p.name) # im.jpg
310120243001
if self.args.save_crop:
imc = im0.copy()
save_one_box(xyxy,
imc,
file=self.save_dir / 'crops' / self.model.model.names[c] /
f'{self.data_path.stem}.jpg',
BGR=True)
return log_string
@hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent),
config_name=DEFAULT_CONFIG.name)
def predict(cfg):
cfg.model = cfg.model or "yolov8n.pt"
cfg.imgsz = check_imgsz(cfg.imgsz, min_dim=2) # check image size
cfg.source = cfg.source if cfg.source is not None else ROOT /
"assets" predictor = DetectionPredictor(cfg)
predictor()
if name == " main ":
reader =
easyocr.Reader(['en'])
predict()
310120243001
OUTPUT:
RESULT:
Thus, a python program to implement Number plate recognition of traffic video analysis has
been executed successfully and the output got verified.