SCHOOL OF INFORMATION AND
COMMUNICATION TECHNOLOGY
DEEP LEARNING WITH PYTHON
AI 385
NAME- ANUSHKA SRIVASTAVA
ROLL NO- 215/UAI/031
BRANCH- B.TECH AI
SEM-5th
INDEX
S.No Program Date Signature
1. To Write a program to implement
Perceptron.
2. To write a program to implement AND OR
gates using Perceptron
3. To implement Crab Classification using
pattern net
4. To write a program to implement Wine
Classification using Backpropagation
5. To write a MatLab Script containing four
functions Addition, Subtraction, Multiply, and
Divide functions
6. Write a program to implement classification
of linearly separable Data with a perceptron
7. To study Long Short Term Memory for Time
Series Prediction
8. To study Convolutional Neural Network and
Recurrent Neural Network
9. To study ImageNet, GoogleNet, ResNet
convolutional Neural Networks
10. To study the use of Long Short-Term Memory
/ Gated Recurrent Units to predict the
stock prices based on historic data.
1
1. To Write a program to implement Perceptron.
import numpy as np
class Perceptron:
def __init__(self, learning_rate=0.01, epochs=100):
self.learning_rate = learning_rate
self.epochs = epochs
self.weights = None
self.bias = None
def train(self, X, y):
# Initialize weights and bias
self.weights = np.zeros(X.shape[1])
self.bias = 0
for epoch in range(self.epochs):
for i in range(X.shape[0]):
prediction = self.predict(X[i])
error = y[i] - prediction
# Update weights and bias
self.weights += self.learning_rate * error * X[i]
self.bias += self.learning_rate * error
def predict(self, x):
# Activation function (Step function)
activation = np.dot(self.weights, x) + self.bias
return 1 if activation >= 0 else 0
# Example usage:
# Binary OR problem
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([0, 1, 1, 1])
perceptron = Perceptron(learning_rate=0.01, epochs=100)
perceptron.train(X, y)
# Test predictions
test_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
for data_point in test_data:
prediction = perceptron.predict(data_point)
print(f"Input: {data_point}, Predicted Output: {prediction}")
2
Output :
3
2. To write a program to implement AND OR gates using Perceptron
import numpy as np
class Perceptron:
def __init__(self, input_size, learning_rate=0.01, epochs=100):
self.learning_rate = learning_rate
self.epochs = epochs
self.weights = np.zeros(input_size)
self.bias = 0
def train(self, X, y):
for epoch in range(self.epochs):
for i in range(X.shape[0]):
prediction = self.predict(X[i])
error = y[i] - prediction
# Update weights and bias
self.weights += self.learning_rate * error * X[i]
self.bias += self.learning_rate * error
def predict(self, x):
# Activation function (Step function)
activation = np.dot(self.weights, x) + self.bias
return 1 if activation >= 0 else 0
# AND gate
X_and = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y_and = np.array([0, 0, 0, 1])
perceptron_and = Perceptron(input_size=2, learning_rate=0.01,
epochs=100)
perceptron_and.train(X_and, y_and)
# Test predictions for AND gate
test_data_and = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
print("AND Gate:")
for data_point in test_data_and:
prediction = perceptron_and.predict(data_point)
print(f"Input: {data_point}, Predicted Output: {prediction}")
# OR gate
X_or = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y_or = np.array([0, 1, 1, 1])
perceptron_or = Perceptron(input_size=2, learning_rate=0.01, epochs=100)
perceptron_or.train(X_or, y_or)
4
# Test predictions for OR gate
test_data_or = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
print("\nOR Gate:")
for data_point in test_data_or:
prediction = perceptron_or.predict(data_point)
print(f"Input: {data_point}, Predicted Output: {prediction}")
Output:
5
3. To implement Crab Classification using Pattern Net
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, classification_report
from sklearn.datasets import load_iris
# Load Iris dataset for demonstration purposes
iris = load_iris()
X = iris.data
y = iris.target
# Split the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
# Standardize features
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Create and train a pattern recognition neural network (MLP)
pattern_net = MLPClassifier(hidden_layer_sizes=(10, ), max_iter=1000,
random_state=42)
pattern_net.fit(X_train_scaled, y_train)
# Make predictions on the test set
predictions = pattern_net.predict(X_test_scaled)
# Evaluate the model
accuracy = accuracy_score(y_test, predictions)
classification_rep = classification_report(y_test, predictions)
print(f"Accuracy: {accuracy:.2f}")
print("Classification Report:")
print(classification_rep)
Output:
6
4. To write a program to implement Wine Classification using Backpropagation
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, classification_report
from sklearn.datasets import load_wine
# Load Wine dataset for demonstration purposes
wine = load_wine()
X = wine.data
y = wine.target
# Split the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
# Standardize features
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Create and train a backpropagation neural network (MLP)
backprop_net = MLPClassifier(hidden_layer_sizes=(10, ), max_iter=1000,
random_state=42)
backprop_net.fit(X_train_scaled, y_train)
# Make predictions on the test set
predictions = backprop_net.predict(X_test_scaled)
# Evaluate the model
accuracy = accuracy_score(y_test, predictions)
classification_rep = classification_report(y_test, predictions)
print(f"Accuracy: {accuracy:.2f}")
print("Classification Report:")
print(classification_rep)
Output:
7
5. To write a MatLab Script containing four functions Addition, Subtraction,
Multiply, and Divide functions
% MATLAB Script with Four Basic Math Functions
% Example usage
a = 5;
b = 3;
% Test Addition
add_result = add(a, b);
disp(['Addition: ' num2str(add_result)]);
% Test Subtraction
subtract_result = subtract(a, b);
disp(['Subtraction: ' num2str(subtract_result)]);
% Test Multiplication
multiply_result = multiply(a, b);
disp(['Multiplication: ' num2str(multiply_result)]);
% Test Division
divide_result = divide(a, b);
disp(['Division: ' num2str(divide_result)]);
% Function for Addition
function result = add(x, y)
result = x + y;
end
% Function for Subtraction
function result = subtract(x, y)
result = x - y;
end
% Function for Multiplication
function result = multiply(x, y)
result = x * y;
end
% Function for Division
function result = divide(x, y)
if y ~= 0
result = x / y;
else
error('Cannot divide by zero.');
end
end
8
Output:
9
6. Write a program to implement classification of linearly separable Data with a
perceptron
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import Perceptron
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# Generate linearly separable data
X, y = make_classification(n_samples=100, n_features=2, n_informative=2,
n_redundant=0,
n_clusters_per_class=1, random_state=42)
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
# Create and train a perceptron model
perceptron = Perceptron(max_iter=100, random_state=42)
perceptron.fit(X_train, y_train)
# Make predictions on the test set
predictions = perceptron.predict(X_test)
# Evaluate the model
accuracy = accuracy_score(y_test, predictions)
print(f"Accuracy: {accuracy:.2f}")
# Plot the decision boundary
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.01), np.arange(y_min,
y_max, 0.01))
Z = perceptron.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, alpha=0.3)
plt.scatter(X[:, 0], X[:, 1], c=y, marker='o', edgecolors='k')
plt.title('Perceptron Classification of Linearly Separable Data')
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.show()
10
Output:
11
7. To study Long Short Term Memory for Time Series Prediction
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
# Generate synthetic time series dataclea
np.random.seed(42)
time = np.arange(0, 100, 1)
sinusoid = np.sin(0.2 * time) + 0.5 * np.random.randn(len(time))
# Visualize the time series
plt.plot(time, sinusoid)
plt.title('Synthetic Time Series Data')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.show()
# Data preprocessing
scaler = MinMaxScaler(feature_range=(-1, 1))
sinusoid_scaled = scaler.fit_transform(sinusoid.reshape(-1, 1))
# Create sequences for LSTM training
def create_sequences(data, seq_length):
X, y = [], []
for i in range(len(data) - seq_length):
seq_in = data[i:i + seq_length]
seq_out = data[i + seq_length]
X.append(seq_in)
y.append(seq_out)
return np.array(X), np.array(y)
sequence_length = 10
X, y = create_sequences(sinusoid_scaled, sequence_length)
# Split the data into training and testing sets
train_size = int(len(X) * 0.8)
X_train, X_test = X[:train_size], X[train_size:]
y_train, y_test = y[:train_size], y[train_size:]
# Build the LSTM model
model = Sequential()
model.add(LSTM(units=50, input_shape=(X_train.shape[1], 1)))
model.add(Dense(units=1))
model.compile(optimizer='adam', loss='mean_squared_error')
12
# Train the model
model.fit(X_train, y_train, epochs=100, batch_size=1, verbose=2)
# Make predictions
train_predict = model.predict(X_train)
test_predict = model.predict(X_test)
# Inverse transform the predictions to original scale
train_predict_inverse = scaler.inverse_transform(train_predict)
y_train_inverse = scaler.inverse_transform(y_train.reshape(-1, 1))
test_predict_inverse = scaler.inverse_transform(test_predict)
y_test_inverse = scaler.inverse_transform(y_test.reshape(-1, 1))
# Evaluate the model
train_rmse = np.sqrt(mean_squared_error(y_train_inverse,
train_predict_inverse))
test_rmse = np.sqrt(mean_squared_error(y_test_inverse,
test_predict_inverse))
print(f'Train RMSE: {train_rmse:.2f}')
print(f'Test RMSE: {test_rmse:.2f}')
# Visualize predictions
plt.plot(time[:train_size], y_train_inverse, label='Actual (Train)')
plt.plot(time[train_size:], y_test_inverse, label='Actual (Test)')
plt.plot(time[sequence_length:train_size], train_predict_inverse,
label='Predicted (Train)')
plt.plot(time[train_size + sequence_length:], test_predict_inverse,
label='Predicted (Test)')
plt.title('LSTM Time Series Prediction')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.legend()
plt.show()
13
Output:
14
8. To study Convolutional Neural Network and Recurrent Neural Network
Convolutional Neural Network (CNN)
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import mnist
# Load and preprocess the MNIST dataset
(train_images, train_labels), (test_images, test_labels) =
mnist.load_data()
train_images = train_images.reshape((60000, 28, 28,
1)).astype('float32') / 255
test_images = test_images.reshape((10000, 28, 28, 1)).astype('float32')
/ 255
train_labels = tf.keras.utils.to_categorical(train_labels)
test_labels = tf.keras.utils.to_categorical(test_labels)
# Build a simple CNN model
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28,
28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
# Compile and train the model
model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5, batch_size=64,
validation_data=(test_images, test_labels))
OUTPUT:
15
Recurrent Neural Network (RNN)
import tensorflow as tf
from tensorflow.keras import layers, models
import numpy as np
# Generate synthetic sequential data
np.random.seed(42)
seq_length = 10
X = np.random.rand(1000, seq_length, 1)
y = np.sum(X, axis=1)
# Build a simple RNN model
model = models.Sequential()
model.add(layers.SimpleRNN(32, activation='relu',
input_shape=(seq_length, 1)))
model.add(layers.Dense(1))
# Compile and train the model
model.compile(optimizer='adam', loss='mse')
model.fit(X, y, epochs=10, batch_size=32, validation_split=0.2)
OUTPUT-
16
9. To study ImageNet, GoogleNet, ResNet convolutional Neural Networks
ImageNet ( Pre-trained VGG16 Model )
import tensorflow as tf
from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input,
decode_predictions
from tensorflow.keras.preprocessing import image
import numpy as np
# Load pre-trained VGG16 model
model_vgg16 = VGG16(weights='imagenet')
# Load and preprocess an image
img_path =
'c:\\Users\\anushka\\Downloads\\Labrador_Retriever_portrait.jpg'
img = image.load_img(img_path, target_size=(224, 224))
img_array = image.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
img_array = preprocess_input(img_array)
# Make predictions
predictions = model_vgg16.predict(img_array)
# Decode and print the top-3 predicted classes
decoded_predictions = decode_predictions(predictions, top=3)[0]
for i, (imagenet_id, label, score) in enumerate(decoded_predictions):
print(f"{i + 1}: {label} ({score:.2f})")
Output:
17
GoogleNet (Inception)
import tensorflow as tf
from tensorflow.keras.applications.inception_v3 import InceptionV3,
preprocess_input, decode_predictions
from tensorflow.keras.preprocessing import image
import numpy as np
# Load pre-trained InceptionV3 model
model_inceptionv3 = InceptionV3(weights='imagenet')
# Load and preprocess an image
img_path =
'c:\\Users\\anushka\\Downloads\\Labrador_Retriever_portrait.jpg'
img = image.load_img(img_path, target_size=(299, 299))
img_array = image.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
img_array = preprocess_input(img_array)
# Make predictions
predictions = model_inceptionv3.predict(img_array)
# Decode and print the top-3 predicted classes
decoded_predictions = decode_predictions(predictions, top=3)[0]
for i, (imagenet_id, label, score) in enumerate(decoded_predictions):
print(f"{i + 1}: {label} ({score:.2f})")
Output:
18
ResNet (Residual Network Pre-trained ResNet50 Model)
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50,
preprocess_input, decode_predictions
from tensorflow.keras.preprocessing import image
import numpy as np
# Load pre-trained ResNet50 model
model_resnet50 = ResNet50(weights='imagenet')
# Load and preprocess an image
img_path =
'c:\\Users\\anushka\\Downloads\\Labrador_Retriever_portrait.jpg'
img = image.load_img(img_path, target_size=(224, 224))
img_array = image.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
img_array = preprocess_input(img_array)
# Make predictions
predictions = model_resnet50.predict(img_array)
# Decode and print the top-3 predicted classes
decoded_predictions = decode_predictions(predictions, top=3)[0]
for i, (imagenet_id, label, score) in enumerate(decoded_predictions):
print(f"{i + 1}: {label} ({score:.2f})")
Output:
19
10. To study the use of Long Short-Term Memory / Gated Recurrent Units to
predict the stock prices based on historic data.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
# Load historical stock prices
# Replace 'your_stock_data.csv' with the path to your CSV file
containing stock prices
data =
pd.read_csv('c:\\Users\\arnav\\Downloads\\stock\\your_stock_data.csv')
data['Date'] = pd.to_datetime(data['Date'])
data.set_index('Date', inplace=True)
# Use the closing prices for prediction
df = data[['Close']]
# Normalize the data
scaler = MinMaxScaler(feature_range=(0, 1))
df_scaled = scaler.fit_transform(df)
# Create sequences for training the LSTM model
def create_sequences(data, seq_length):
X, y = [], []
for i in range(len(data) - seq_length):
seq_in = data[i:i + seq_length, 0]
seq_out = data[i + seq_length, 0]
X.append(seq_in)
y.append(seq_out)
return np.array(X), np.array(y)
# Set sequence length and create sequences
sequence_length = 10
X, y = create_sequences(df_scaled, sequence_length)
# Split the data into training and testing sets
train_size = int(len(X) * 0.8)
X_train, X_test = X[:train_size], X[train_size:]
y_train, y_test = y[:train_size], y[train_size:]
# Reshape input to be [samples, time steps, features]
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
# Build the LSTM model
20
model = Sequential()
model.add(LSTM(units=50, return_sequences=True,
input_shape=(X_train.shape[1], 1)))
model.add(LSTM(units=50))
model.add(Dense(units=1))
model.compile(optimizer='adam', loss='mean_squared_error')
# Train the model
model.fit(X_train, y_train, epochs=10, batch_size=32)
# Make predictions on the test set
predictions = model.predict(X_test)
# Inverse transform the predictions to original scale
predictions_inverse = scaler.inverse_transform(predictions)
y_test_inverse = scaler.inverse_transform(y_test.reshape(-1, 1))
# Visualize the results
plt.figure(figsize=(16, 8))
plt.plot(data.index[train_size + sequence_length:], y_test_inverse,
label='Actual Prices')
plt.plot(data.index[train_size + sequence_length:], predictions_inverse,
label='Predicted Prices')
plt.title('Stock Price Prediction using LSTM')
plt.xlabel('Date')
plt.ylabel('Stock Price')
plt.legend()
plt.show()
Output: