0% found this document useful (0 votes)
6 views

deeplearning_labrecord

Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
6 views

deeplearning_labrecord

Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 18

EXPERIMENT-1

Aim: Write an application to build a perceptron.


Dataset Description: The dataset is synthetically generated with two features for each data
point. The labels are assigned based on whether the sum of the two features is greater than 0.
• Features:
o Two random values (x1, x2) generated between -1 and 1.
• Labels:
o A label of 1 is assigned if the sum of the two features (x1 + x2) is greater than
0.
o Otherwise, the label is 0.
Attributes:
1. X (Features):
o Shape: (num_examples, 2)
o Values are randomly generated between -1 and 1.
2. y (Labels):
o Shape: (num_examples,)
o Binary labels: 0 or 1 depending on whether the sum of the features is greater than 0.

Code:
import numpy as np
import matplotlib.pyplot as plt

# Generate dataset
def generate_dataset(num_examples):
np.random.seed(42)
X = np.random.rand(num_examples, 2) * 2 - 1 # Generate random points
y = (X[:, 0] + X[:, 1] > 0).astype(int) # Label: 1 if sum > 0, else 0
return X, y

# Train perceptron
def perceptron_train(X, y, learning_rate, epochs):
num_examples, num_features = X.shape
weights = np.random.rand(num_features) # Initialize weights
bias = np.random.rand() # Initialize bias

for epoch in range(epochs):


for i in range(num_examples):
prediction = np.dot(X[i], weights) + bias
error = y[i] - (prediction > 0)
if error != 0:
weights += learning_rate * error * X[i]
bias += learning_rate * error
return weights, bias
# Predict function
def perceptron_predict(X, weights, bias):
return (np.dot(X, weights) + bias) >= 0

# Plot decision boundary


def plot_decision_boundary(X, y, weights, bias, title):
plt.figure(figsize=(8, 4))
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100), np.linspace(y_min, y_max, 100))
Z = perceptron_predict(np.c_[xx.ravel(), yy.ravel()], weights, bias)
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, alpha=0.8, cmap=plt.cm.Paired)
plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k', cmap=plt.cm.Paired)
plt.title(title)
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.show()

# Run experiment with specific parameters


size = 40
lr = 0.3
epoch = 10

# Generating dataset
X, y = generate_dataset(size)

# Training perceptron
weights, bias = perceptron_train(X, y, lr, epoch)

# Plotting decision boundary


title = f'Dataset Size: {size}, Learning Rate: {lr}, Epochs: {epoch}'
plot_decision_boundary(X, y, weights, bias, title)

# Printing predictions
predictions = perceptron_predict(X, weights, bias)
inputs_predictions = [(X[i], int(predictions[i])) for i in range(size)]
inputs_predictions

Output:
[(array([-0.25091976, 0.90142861]), 1),
(array([0.46398788, 0.19731697]), 1),
(array([-0.68796272, -0.68801096]), 0),
(array([-0.88383278, 0.73235229]), 0),
(array([0.20223002, 0.41614516]), 1),
(array([-0.95883101, 0.9398197 ]), 0),
(array([ 0.66488528, -0.57532178]), 0),
(array([-0.63635007, -0.63319098]), 0),
(array([-0.39151551, 0.04951286]), 0),
(array([-0.13610996, -0.41754172]), 0),
(array([ 0.22370579, -0.72101228]), 0),
(array([-0.4157107 , -0.26727631]), 0),
(array([-0.08786003, 0.57035192]), 1),
(array([-0.60065244, 0.02846888]), 0),
(array([ 0.18482914, -0.90709917]), 0),
(array([ 0.2150897 , -0.65895175]), 0),
(array([-0.86989681, 0.89777107]), 0),
(array([0.93126407, 0.6167947 ]), 1),
(array([-0.39077246, -0.80465577]), 0),
(array([ 0.36846605, -0.11969501]), 1),
(array([-0.75592353, -0.00964618]), 0),
(array([-0.93122296, 0.8186408 ]), 0),
(array([-0.48244004, 0.32504457]), 0),
(array([-0.37657785, 0.04013604]), 0),
(array([ 0.09342056, -0.63029109]), 0),
(array([0.93916926, 0.55026565]), 1),
(array([0.87899788, 0.7896547 ]), 1),
(array([0.19579996, 0.84374847]), 1),
(array([-0.823015 , -0.60803428]), 0),
(array([-0.90954542, -0.34933934]), 0),
(array([-0.22264542, -0.45730194]), 0),
(array([ 0.65747502, -0.28649335]), 1),
(array([-0.43813098, 0.08539217]), 0),
(array([-0.71815155, 0.60439396]), 0),
(array([-0.85089871, 0.97377387]), 1),
(array([ 0.54448954, -0.60256864]), 0),
(array([-0.98895577, 0.63092286]), 0),
(array([0.41371469, 0.45801434]), 1),
(array([ 0.54254069, -0.8519107 ]), 0),
(array([-0.28306854,-0.76826188]),0)]
EXPERIMENT-2
Aim: Write an application to build AND, OR gates using perceptron.
Dataset Description: The inputs for the gates are binary combinations of two inputs:

• [0, 0], [0, 1], [1, 0], [1, 1].

These represent the possible input combinations for logic gates. The expected outputs depend
on the behavior of the logic gate (AND or OR).

Attributes:

1. Inputs:
o Binary combinations of two inputs (0 or 1).
o Shape: (4, 2).
2. Weights and Bias:
o For the AND gate:
▪ Weights: [1, 1].
▪ Bias: -1.5.
o For the OR gate:
▪ Weights: [1, 1].
▪ Bias: -0.5.
3. Outputs:
o AND Gate:
▪ Produces 1 only when both inputs are 1.
o OR Gate:
▪ Produces 1 if at least one of the inputs is 1.

Code:
import numpy as np
import matplotlib.pyplot as plt

# Perceptron for AND gate


class PerceptronAND:
weights = np.array([1, 1]) # Weights for AND gate
bias = -1.5 # Bias for AND gate

@classmethod
def predict(cls, inputs):
return 1 if np.dot(inputs, cls.weights) + cls.bias > 0 else 0

# Perceptron for OR gate


class PerceptronOR:
weights = np.array([1, 1]) # Weights for OR gate
bias = -0.5 # Bias for OR gate

@classmethod
def predict(cls, inputs):
return 1 if np.dot(inputs, cls.weights) + cls.bias > 0 else 0

# Test cases and printing results


def test_gates():
# Test cases for AND gate
print("AND Gate:")
test_inputs_and = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
for inputs_and in test_inputs_and:
prediction_and = PerceptronAND.predict(inputs_and)
print(f"{inputs_and} -> {prediction_and}")

# Test cases for OR gate


print("\nOR Gate:")
test_inputs_or = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
for inputs_or in test_inputs_or:
prediction_or = PerceptronOR.predict(inputs_or)
print(f"{inputs_or} -> {prediction_or}")

# Visualization function for decision boundaries


def plot_gate(gate, gate_name):
plt.figure(figsize=(6, 6))
plt.title(f"{gate_name} Gate")

# Plotting the points


plt.scatter([0, 1, 0, 1], [0, 0, 1, 1], color='blue', label='Output: 0') # Points for 0 (blue)
if gate_name == "AND":
plt.scatter(1, 1, color='red', label='Output: 1') # Point for 1 (red)
else:
plt.scatter([0, 1, 1], [1, 0, 1], color='red', label='Output: 1') # Points for 1 (red)

# Creating the decision boundary


x_vals = np.linspace(-1, 2, 100)
y_vals = (-gate.weights[0] * x_vals - gate.bias) / gate.weights[1]
plt.plot(x_vals, y_vals, 'g--', label='Decision Boundary')

plt.xlim(-0.5, 1.5)
plt.ylim(-0.5, 1.5)
plt.xlabel('Input 1')
plt.ylabel('Input 2')
plt.legend()
plt.grid()
plt.show()

# Running tests and plotting


test_gates()
plot_gate(PerceptronAND, "AND")
plot_gate(PerceptronOR, "OR")

Output:
AND Gate:
[0 0] -> 0
[0 1] -> 0
[1 0] -> 0
[1 1] -> 1

OR Gate:
[0 0] -> 0
[0 1] -> 1
[1 0] -> 1
[1 1] -> 1
EXPERIMENT-3
Aim: Write an application to implement a simple neural network.
Dataset Description:

The dataset is designed to solve the XOR problem, which is a non-linear classification
problem.

• Inputs: Two binary inputs (0 or 1).


• Outputs: A single binary output where the result is 1 if the inputs are different and 0
if they are the same.

Attributes:

1. Input Features:
o Two binary values representing the inputs to the XOR gate.
2. Target Output:
o A single binary value representing the XOR output.
3. Neural Network Architecture:
o Input Layer: 2 neurons (corresponding to the two inputs).
o Hidden Layer: 4 neurons (configurable).
o Output Layer: 1 neuron (for the binary XOR output).

Code:

import numpy as np

import matplotlib.pyplot as plt

# Sigmoid activation function and its derivative

def sigmoid(x):

return 1 / (1 + np.exp(-x))

def sigmoid_derivative(x):

return x * (1 - x)

# Neural network training function

def train_neural_network(X, Y, hidden_neurons=4, epochs=10000, learning_rate=0.1):

input_neurons = X.shape[1]

output_neurons = Y.shape[1]

# Initialize weights and biases using Xavier/Glorot initialization


hidden_weights = np.random.randn(input_neurons, hidden_neurons) * np.sqrt(2 /
(input_neurons + hidden_neurons))

hidden_bias = np.zeros((1, hidden_neurons))

output_weights = np.random.randn(hidden_neurons, output_neurons) * np.sqrt(2 /


(hidden_neurons + output_neurons))

output_bias = np.zeros((1, output_neurons))

losses = [] # To track the loss during training

for epoch in range(epochs):

# Forward propagation

hidden_layer_activation = np.dot(X, hidden_weights) + hidden_bias

hidden_layer_output = sigmoid(hidden_layer_activation)

output_layer_activation = np.dot(hidden_layer_output, output_weights) + output_bias

predicted_output = sigmoid(output_layer_activation)

# Calculate loss and append to losses list for visualization

loss = np.mean(np.square(Y - predicted_output))

losses.append(loss)

# Backpropagation

error = Y - predicted_output

d_predicted_output = error * sigmoid_derivative(predicted_output)

error_hidden_layer = d_predicted_output.dot(output_weights.T)

d_hidden_layer = error_hidden_layer * sigmoid_derivative(hidden_layer_output)

# Updating weights and biases

output_weights += hidden_layer_output.T.dot(d_predicted_output) * learning_rate

output_bias += np.sum(d_predicted_output, axis=0, keepdims=True) * learning_rate

hidden_weights += X.T.dot(d_hidden_layer) * learning_rate

hidden_bias += np.sum(d_hidden_layer, axis=0, keepdims=True) * learning_rate


return hidden_weights, hidden_bias, output_weights, output_bias, losses

# XOR input and output

X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])

Y = np.array([[0], [1], [1], [0]])

# Training the model

hidden_weights, hidden_bias, output_weights, output_bias, losses = train_neural_network(X,


Y)

# Predictions

hidden_layer_activation = np.dot(X, hidden_weights) + hidden_bias

hidden_layer_output = sigmoid(hidden_layer_activation)

output_layer_activation = np.dot(hidden_layer_output, output_weights) + output_bias

predicted_output = sigmoid(output_layer_activation)

print("Predictions:")

for i in range(len(X)):

print(f"Input: {X[i]}, Predicted Output: {predicted_output[i][0]:.4f}, Actual Output:


{Y[i][0]}")

# Plotting the decision boundary

plt.figure(figsize=(8, 6))

plt.scatter(X[:, 0], X[:, 1], c=Y.flatten(), cmap='coolwarm', s=100, label='XOR Data')

plt.title('XOR Data and Decision Boundary')

plt.xlabel('Input 1')

plt.ylabel('Input 2')

x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1

y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1


xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100), np.linspace(y_min, y_max, 100))

grid_inputs = np.c_[xx.ravel(), yy.ravel()]

# Forward propagation to get predictions for the grid

grid_hidden = sigmoid(np.dot(grid_inputs, hidden_weights) + hidden_bias)

grid_output = sigmoid(np.dot(grid_hidden, output_weights) + output_bias)

predictions_grid = grid_output.reshape(xx.shape)

# Plot decision boundary

plt.contourf(xx, yy, predictions_grid, cmap='coolwarm', alpha=0.3)

plt.legend()

plt.colorbar(label='Output')

plt.show()

# Plotting training loss

plt.figure(figsize=(8, 6))

plt.plot(losses)

plt.title('Training Loss over Epochs')

plt.xlabel('Epochs')

plt.ylabel('Loss')

plt.show()
Output:
Predictions:
Input: [0 0], Predicted Output: 0.0402, Actual Output: 0
Input: [0 1], Predicted Output: 0.9420, Actual Output: 1
Input: [1 0], Predicted Output: 0.9420, Actual Output: 1
Input: [1 1], Predicted Output: 0.0678, Actual Output: 0
EXPERIMENT-4
Aim: Write an application to implement a Multi-layer neural network.
Dataset Description: The dataset consists of all possible 3-bit binary combinations as
inputs and their corresponding outputs based on the behavior of an AND gate.

• Input Data:
All possible combinations of three binary digits (0 or 1).
• Output Data:
The AND gate outputs 1 if and only if all three input bits are 1. Otherwise, it outputs 0.

Attributes:

• Input 1: The first binary input (0 or 1).


• Input 2: The second binary input (0 or 1).
• Input 3: The third binary input (0 or 1).
• Output: The AND gate output (0 or 1).

Code:

import numpy as np

import matplotlib.pyplot as plt

from mpl_toolkits.mplot3d import Axes3D

# Sigmoid activation function and its derivative

def sigmoid(x):

return 1 / (1 + np.exp(-x))

def sigmoid_derivative(x):

return x * (1 - x)

# Input data (3-bit combinations)

inputs = np.array([

[0, 0, 0],

[0, 0, 1],

[0, 1, 0],

[0, 1, 1],
[1, 0, 0],

[1, 0, 1],

[1, 1, 0],

[1, 1, 1]

])

# Corresponding AND gate outputs

outputs = np.array([[0], [0], [0], [0], [0], [0], [0], [1]])

# Initialize weights randomly

np.random.seed(42)

weights_input_hidden = np.random.rand(3, 4) # Weights for input to hidden layer

weights_hidden_output = np.random.rand(4, 1) # Weights for hidden to output layer

# Training the neural network

epochs = 1000

learning_rate = 0.1

for epoch in range(epochs):

# Forward propagation

hidden_layer_input = np.dot(inputs, weights_input_hidden)

hidden_layer_output = sigmoid(hidden_layer_input)

output_layer_input = np.dot(hidden_layer_output, weights_hidden_output)

predicted_output = sigmoid(output_layer_input)

# Backpropagation

error = outputs - predicted_output

d_predicted_output = error * sigmoid_derivative(predicted_output)

error_hidden_layer = d_predicted_output.dot(weights_hidden_output.T)

d_hidden_layer = error_hidden_layer * sigmoid_derivative(hidden_layer_output)


# Updating weights using gradients

weights_hidden_output += hidden_layer_output.T.dot(d_predicted_output) * learning_rate

weights_input_hidden += inputs.T.dot(d_hidden_layer) * learning_rate

# Testing the trained model

test_data = np.array([

[0, 0, 0],

[0, 0, 1],

[0, 1, 0],

[0, 1, 1],

[1, 0, 0],

[1, 0, 1],

[1, 1, 0],

[1, 1, 1]

])

hidden_layer_output = sigmoid(np.dot(test_data, weights_input_hidden))

predicted_output = sigmoid(np.dot(hidden_layer_output, weights_hidden_output))

print("3-Bit AND Gate Representation:")

for i in range(len(test_data)):

print(f"Input: {test_data[i]} Output: {round(predicted_output[i][0])}")

# Plotting the 3-bit AND gate

fig = plt.figure()

ax = fig.add_subplot(111, projection='3d')

for i in range(len(test_data)):

x, y, z = test_data[i]

color = 'b' # Default color for 0 output


if (x, y, z) == (1, 1, 1): # Highlighting the true output

color = 'r'

ax.scatter(x, y, z, c=color)

ax.set_xlabel('Input 1')

ax.set_ylabel('Input 2')

ax.set_zlabel('Input 3')

ax.set_title('Output of 3-Bit AND Gate')

plt.show()

Output:
3-Bit AND Gate Representation:
Input: [0 0 0] Output: 0
Input: [0 0 1] Output: 0
Input: [0 1 0] Output: 0
Input: [0 1 1] Output: 0
Input: [1 0 0] Output: 0
Input: [1 0 1] Output: 0
Input: [1 1 0] Output: 0
Input: [1 1 1] Output: 0
EXPERIMENT-5
Aim: To write an application to solve real world problem.
Dataset Description: The dataset is synthetically generated and consists of 1000 samples
with the following attributes:

1. Temperature: Represents the temperature in degrees Celsius, ranging between 10


and 35.
2. Humidity: Represents the humidity percentage, ranging between 30% and 80%.
3. Wind Speed: Represents the wind speed in m/s, ranging between 0 and 15.
4. Emission Levels: The target variable that represents the emission levels, calculated
using a linear equation with some added noise.

Attributes:

• Temperature: Independent variable that affects emission levels.


• Humidity: Independent variable that influences emission levels.
• Wind Speed: Independent variable contributing to emission levels.
• Emission Levels: Dependent variable that is predicted using the regression model.

Code:
import numpy as np

import pandas as pd

from sklearn.model_selection import train_test_split

from sklearn.linear_model import LinearRegression

from sklearn.metrics import mean_squared_error

import matplotlib.pyplot as plt

# Setting seed for reproducibility

np.random.seed(42)

# Generating random data

num_samples = 1000

temperature = np.random.uniform(10, 35, num_samples) # Temperature in degrees Celsius

humidity = np.random.uniform(30, 80, num_samples) # Humidity in percentage

wind_speed = np.random.uniform(0, 15, num_samples) # Wind speed in m/s


# Generating target variable (Emission Levels) with added noise

emission_levels = 10 + 2*temperature + 1.5*humidity + 0.5*wind_speed +


np.random.normal(0, 5, num_samples)

# Creating a DataFrame

data = pd.DataFrame({

'Temperature': temperature,

'Humidity': humidity,

'Wind_Speed': wind_speed,

'Emission_Levels': emission_levels

})

# Splitting data into training and testing sets

X_train, X_test, y_train, y_test = train_test_split(

data[['Temperature', 'Humidity', 'Wind_Speed']],

data['Emission_Levels'],

test_size=0.2,

random_state=42

# Fitting a Linear Regression model

model = LinearRegression()

model.fit(X_train, y_train)

# Predicting on test data

y_pred = model.predict(X_test)

# Evaluating the model

mse = mean_squared_error(y_test, y_pred)

print(f'Mean Squared Error: {mse}')


# Plotting results

plt.scatter(y_test, y_pred, label='Actual vs. Predicted')

plt.plot(y_test, y_test, color='red', label='Ideal Line') # Ideal line where Predicted = Actual

plt.xlabel('Actual Emission Levels')

plt.ylabel('Predicted Emission Levels')

plt.title('Actual vs. Predicted Emission Levels')

plt.legend()

plt.show()

Output:

You might also like