0% found this document useful (0 votes)
13 views20 pages

ANN Programs

The document outlines various practical exercises involving Python programming for neural networks, including plotting activation functions, implementing McCulloch-Pitts neurons, recognizing even and odd numbers with a perceptron, and creating a neural network for multi-class classification. Each practical includes code snippets demonstrating the concepts and algorithms used in neural network training and evaluation. The exercises cover topics such as forward propagation, back propagation, and decision regions visualization.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
13 views20 pages

ANN Programs

The document outlines various practical exercises involving Python programming for neural networks, including plotting activation functions, implementing McCulloch-Pitts neurons, recognizing even and odd numbers with a perceptron, and creating a neural network for multi-class classification. Each practical includes code snippets demonstrating the concepts and algorithms used in neural network training and evaluation. The exercises cover topics such as forward propagation, back propagation, and decision regions visualization.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 20

Practical no 1

Write a Python program to plot a few activation functions that are being used in neural networks.
import numpy as np

import matplotlib.pyplot as plt

def sigmoid(x):

return 1 / (1 + np.exp(-x))

# Generate x values from -10 to 10

x = np.linspace(-10, 10, 100)

# Calculate sigmoid function values for x

y = sigmoid(x)

# Plot sigmoid function

plt.plot(x, y)

plt.title('Sigmoid Activation Function')

plt.xlabel('x')

plt.ylabel('sigmoid(x)')

plt.show()
Practical no 2

Generate ANDNOT function using McCulloch-Pitts neural net by a python program


https://www.youtube.com/watch?v=N5vNT4J6GDg

—----------------------------------------------------------------------------------------------------------------------
--------
import numpy as np

def mp_neuron(inputs, weights, threshold):

weighted_sum = np.dot(inputs, weights)

output = 1 if weighted_sum >= threshold else 0

return output

def and_not(x1, x2):

weights = [1, -1]

threshold = 1

inputs = np.array([x1, x2])

output = mp_neuron(inputs, weights, threshold)

return output

print(and_not(0, 0))

print(and_not(1, 0))

print(and_not(0, 1))

print(and_not(1, 1))

—----------------------------------------------------------------------------------------------------------------------------------

import numpy as np

# Define the input and output vectors for the AND NOT function

X = np.array([[0, 0],

[1, 0],

[0, 1]])

Y = np.array([1, 0, 1])

# Initialize the weights and bias


w = np.array([0.0, 0.0], dtype=np.float64)

b = 0.0

# Set the learning rate and number of iterations

lr = 0.1

n_iter = 10

# Define the step function for the output

def step(x):

return 1 if x > 0 else 0

# Train the network using the perceptron learning algorithm

for i in range(n_iter):

for j in range(len(X)):

x = X[j]

y = Y[j]

z = np.dot(x, w) + b

o = step(z)

dw = lr * (y - o) * x

db = lr * (y - o)

w += dw

b += db

# Test the network using the trained weights and bias

test_x = np.array([1, 1])

test_z = np.dot(test_x, w) + b

test_o = step(test_z)

# Print the results

print("Input: ", test_x)

print("Output: ", test_o)


Practical No 3

Write a Python Program using Perceptron Neural Network to recognize even and odd numbers.
Given numbers are in ASCII from 0 to 9

import numpy as np

j = int(input("Enter a Number (0-9): "))

step_function = lambda x: 1 if x >= 0 else 0

training_data = [

{'input': [0, 0, 0, 0, 0, 0], 'label': 1},

{'input': [0, 0, 0, 0, 0, 1], 'label': 0},

{'input': [0, 0, 0, 0, 1, 0], 'label': 1},

{'input': [0, 0, 0, 1, 1, 1], 'label': 0},

{'input': [0, 0, 0, 1, 0, 0], 'label': 1},

{'input': [0, 0, 0, 1, 0, 1], 'label': 0},

{'input': [0, 0, 0, 1, 1, 0], 'label': 1},

{'input': [0, 0, 0, 1, 1, 1], 'label': 0},

{'input': [0, 0, 1, 0, 0, 0], 'label': 1},

{'input': [0, 0, 1, 0, 0, 1], 'label': 0},

weights = np.array([0, 0, 0, 0, 0, 1])

for data in training_data:

input = np.array(data['input'])

label = data['label']

output = step_function(np.dot(input, weights))

error = label - output

weights += input * error

input = np.array([int(x) for x in list('{0:06b}'.format(j))])

output = "odd" if step_function(np.dot(input, weights)) == 0 else "even"

print(j, " is ", output)


Practical No 4

With a suitable example demonstrate the perceptron learning law with its decision regions using
python. Give the output in graphical form.

Ref:
https://pyimagesearch.com/2021/05/06/implementing-the-perceptron-neural-network-with-py
thon/

import numpy as np

import matplotlib.pyplot as plt

from sklearn.linear_model import Perceptron

# Generate random data

np.random.seed(0)

X = np.random.randn(100, 2)

y = np.where(X[:, 0] + X[:, 1] > 0, 1, -1)

# Create perceptron object

clf = Perceptron(random_state=0)

# Fit perceptron to the data

clf.fit(X, y)

# Plot decision regions

xmin, xmax = X[:, 0].min() - 1, X[:, 0].max() + 1

ymin, ymax = X[:, 1].min() - 1, X[:, 1].max() + 1

xx, yy = np.meshgrid(np.arange(xmin, xmax, 0.1),

np.arange(ymin, ymax, 0.1))

Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

Z = Z.reshape(xx.shape)

plt.contourf(xx, yy, Z, alpha=0.4)

plt.scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)

plt.xlim(xmin, xmax)

plt.ylim(ymin, ymax)
plt.xlabel('Feature 1')

plt.ylabel('Feature 2')

plt.show()

—---------------------------------------------------------------------------------------------------------------------------------

import numpy as np

import matplotlib.pyplot as plt

X = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])

Y = np.array([-1, -1, -1, 1])

w = np.zeros(X.shape[1])

b=0

for _ in range(6):

for i in range(X.shape[0]):

y_pred = np.sign(np.dot(X[i], w) + b)

if y_pred != Y[i]:

w += 0.3 * Y[i] * X[i]

b += 0.3 * Y[i]
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1

y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1

xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.01),

np.arange(y_min, y_max, 0.01))

Z = np.sign(np.dot(np.c_[xx.ravel(), yy.ravel()], w) + b)

Z = Z.reshape(xx.shape)

plt.contourf(xx, yy, Z, alpha=0.8)

plt.scatter(X[:, 0], X[:, 1], c=Y)

plt.xlabel('X1')

plt.ylabel('X2')

plt.title('Perceptron Decision Regions')

plt.show()
Practical No- 5

Implement Artificial Neural Network training process in Python by using Forward Propagation,
Back Propagation

Program 1 :

from joblib.numpy_pickle_utils import xrange

from numpy import *

class NeuralNet(object):

def __init__(self):

# Generate random numbers

random.seed(1)

# Assign random weights to a 3 x 1 matrix,

self.synaptic_weights = 2 * random.random((3, 1)) - 1

# The Sigmoid function

def __sigmoid(self, x):

return 1 / (1 + exp(-x))

# The derivative of the Sigmoid function.

# This is the gradient of the Sigmoid curve.

def __sigmoid_derivative(self, x):

return x * (1 - x)

# Train the neural network and adjust the weights each time.

def train(self, inputs, outputs, training_iterations):

for iteration in xrange(training_iterations):

# Pass the training set through the network.

output = self.learn(inputs)

# Calculate the error

error = outputs - output

# Adjust the weights by a factor

factor = dot(inputs.T, error * self.__sigmoid_derivative(output))


self.synaptic_weights += factor

# The neural network thinks.

def learn(self, inputs):

return self.__sigmoid(dot(inputs, self.synaptic_weights))

if __name__ == "__main__":

# Initialize

neural_network = NeuralNet()

# The training set.

inputs = array([[0, 1, 1], [1, 0, 0], [1, 0, 1]])

outputs = array([[1, 0, 1]]).T

# Train the neural network

neural_network.train(inputs, outputs, 10000)

# Test the neural network with a test example.

print(neural_network.learn(array([1, 0, 1])))

********************************************************************************************

Program 2:

import numpy as np

# Define the sigmoid activation function and its derivative

def sigmoid(x):

return 1 / (1 + np.exp(-x))

def sigmoid_derivative(x):

return x * (1 - x)

# Define the input and target output data

X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])

y = np.array([[0], [1], [1], [0]])

# Define the neural network structure and initialize weights

input_size = 2
hidden_size = 2

output_size = 1

weights_input_hidden = np.random.uniform(size=(input_size, hidden_size))

weights_hidden_output = np.random.uniform(size=(hidden_size, output_size))

# Set the learning rate and number of epochs

learning_rate = 0.1

num_epochs = 10000

# Define the training loop

for i in range(num_epochs):

# Forward propagation

hidden_layer_input = np.dot(X, weights_input_hidden)

hidden_layer_activation = sigmoid(hidden_layer_input)

output_layer_input = np.dot(hidden_layer_activation, weights_hidden_output)

output_layer_activation = sigmoid(output_layer_input)

# Calculate the error and delta for backpropagation

error = y - output_layer_activation

output_delta = error * sigmoid_derivative(output_layer_activation)

hidden_delta = output_delta.dot(weights_hidden_output.T) * sigmoid_derivative(hidden_layer_activation)

# Update the weights

weights_hidden_output += hidden_layer_activation.T.dot(output_delta) * learning_rate

weights_input_hidden += X.T.dot(hidden_delta) * learning_rate

# Make predictions on the input data

hidden_layer_input = np.dot(X, weights_input_hidden)

hidden_layer_activation = sigmoid(hidden_layer_input)

output_layer_input = np.dot(hidden_layer_activation, weights_hidden_output)

output_layer_activation = sigmoid(output_layer_input)

print(output_layer_activation)
Practical No. 6

Create a Neural network architecture from scratch in Python and use it to do multi-class
classification on any data

import numpy as np

import pandas as pd

import matplotlib.pyplot as plt

from sklearn.model_selection import train_test_split

from sklearn.preprocessing import StandardScaler

# Add header names

headers = ['age', 'sex', 'chest_pain', 'resting_blood_pressure', 'serum_cholestoral',

'fasting_blood_sugar', 'resting_ecg_results', 'max_heart_rate_achieved',

'exercise_induced_angina', 'oldpeak', 'slope_of_the_peak', 'num_of_major_vessels',

'thal', 'heart_disease']

# Load the dataset

heart_df = pd.read_csv('/home/student/Downloads/heart.csv', sep=',', names=headers)

# Convert input to numeric values (if any non-numeric values exist)

heart_df = heart_df.apply(pd.to_numeric, errors='coerce')

# Handle missing values (if any)

heart_df.fillna(heart_df.mean(), inplace=True)

# Convert input data to numpy arrays

X = heart_df.drop(columns=['heart_disease'])
# Replace target class with 0 and 1

heart_df['heart_disease'] = heart_df['heart_disease'].replace({1: 0, 2: 1})

y_label = heart_df['heart_disease'].values.reshape(-1, 1)

# Split data into train and test sets

Xtrain, Xtest, ytrain, ytest = train_test_split(X, y_label, test_size=0.2, random_state=2)

# Standardize the dataset

sc = StandardScaler()

Xtrain = sc.fit_transform(Xtrain)

Xtest = sc.transform(Xtest)

print(heart_df.dtypes)

print(f"\nShape of train set is {Xtrain.shape}")

print(f"\nShape of test set is {Xtest.shape}")

print(f"\nShape of train label is {ytrain.shape}")

print(f"\nShape of test labels is {ytest.shape}")

# Neural Network Class

class NeuralNet:

def _init_(self, layers=[13, 8, 1], learning_rate=0.001, iterations=1000):

self.params = {}

self.learning_rate = learning_rate

self.iterations = iterations

self.loss = []
self.layers = layers

self.X = None

self.y = None

def init_weights(self):

np.random.seed(1)

self.params["W1"] = np.random.randn(self.layers[0], self.layers[1]) * 0.01

self.params['b1'] = np.zeros((1, self.layers[1]))

self.params['W2'] = np.random.randn(self.layers[1], self.layers[2]) * 0.01

self.params['b2'] = np.zeros((1, self.layers[2]))

def sigmoid(self, Z):

return 1 / (1 + np.exp(-Z))

def dSigmoid(self, Z):

sig = self.sigmoid(Z)

return sig * (1 - sig)

def relu(self, Z):

return np.maximum(0, Z)

def dRelu(self, Z):

return (Z > 0).astype(float)

def eta(self, x):

return np.maximum(x, 1e-10)


def entropy_loss(self, y, yhat):

epsilon = 1e-10

yhat = np.clip(yhat, epsilon, 1 - epsilon)

loss = -np.mean(y * np.log(yhat) + (1 - y) * np.log(1 - yhat))

return loss

def forward_propagation(self):

Z1 = self.X.dot(self.params['W1']) + self.params['b1']

A1 = self.relu(Z1)

Z2 = A1.dot(self.params['W2']) + self.params['b2']

yhat = self.sigmoid(Z2)

loss = self.entropy_loss(self.y, yhat)

self.params['Z1'] = Z1

self.params['Z2'] = Z2

self.params['A1'] = A1

return yhat, loss

def back_propagation(self, yhat):

m = self.X.shape[0]

y_inv = 1 - self.y

yhat_inv = 1 - yhat

dl_wrt_yhat = (yhat - self.y) / m

dl_wrt_sig = self.dSigmoid(self.params['Z2'])

dl_wrt_z2 = dl_wrt_yhat * dl_wrt_sig


dl_wrt_A1 = dl_wrt_z2.dot(self.params['W2'].T)

dl_wrt_w2 = self.params['A1'].T.dot(dl_wrt_z2)

dl_wrt_b2 = np.sum(dl_wrt_z2, axis=0, keepdims=True)

dl_wrt_z1 = dl_wrt_A1 * self.dRelu(self.params['Z1'])

dl_wrt_w1 = self.X.T.dot(dl_wrt_z1)

dl_wrt_b1 = np.sum(dl_wrt_z1, axis=0, keepdims=True)

self.params['W1'] -= self.learning_rate * dl_wrt_w1

self.params['W2'] -= self.learning_rate * dl_wrt_w2

self.params['b1'] -= self.learning_rate * dl_wrt_b1

self.params['b2'] -= self.learning_rate * dl_wrt_b2

def fit(self, X, y):

self.X = X

self.y = y

self.init_weights()

for i in range(self.iterations):

yhat, loss = self.forward_propagation()

self.back_propagation(yhat)

self.loss.append(loss)

# Learning rate decay (optional)

if i % 100 == 0 and i != 0:

self.learning_rate *= 0.9 # Decay learning rate


if i % 100 == 0:

print(f"Iteration {i}, Loss: {loss}")

def predict(self, X):

Z1 = X.dot(self.params['W1']) + self.params['b1']

A1 = self.relu(Z1)

Z2 = A1.dot(self.params['W2']) + self.params['b2']

yhat = self.sigmoid(Z2)

return np.round(yhat)

def acc(self, y, yhat):

return np.mean(y == yhat) * 100

def plot_loss(self):

plt.plot(self.loss)

plt.xlabel("Iterations")

plt.ylabel("Loss")

plt.title("Loss curve during training")

plt.show()

# Train the model

nn = NeuralNet(layers=[13, 8, 1], learning_rate=0.001, iterations=1000)

nn.fit(Xtrain, ytrain)

# Evaluate the model

y_pred = nn.predict(Xtest)

accuracy = nn.acc(ytest, y_pred)


print(f"Test Accuracy: {accuracy}%")

# Plot the loss curve

nn.plot_loss()

Practical No. 7

Write a python program to illustrate ART neural network.

import numpy as np

def initialize_weights(input_dim, category):

weights = np.random.uniform(size=(input_dim,))

weights /= np.sum(weights)

return weights

def calculate_similarity(input_pattern, weights):

return np.minimum(input_pattern, weights).sum()

def update_weights(input_pattern, weights, vigilance):

while True:

activation = calculate_similarity(input_pattern, weights)

if activation >= vigilance:

return weights

else:

weights[np.argmax(input_pattern)] += 1

weights /= np.sum(weights)

def ART_neural_network(input_patterns, vigilance):

num_patterns, input_dim = input_patterns.shape


categories = []

for pattern in input_patterns:

matched_category = None

for category in categories:

if calculate_similarity(pattern, category["weights"]) >= vigilance:

matched_category = category

break

if matched_category is None:

weights = initialize_weights(input_dim, len(categories))

matched_category = {"weights": weights, "patterns": []}

categories.append(matched_category)

matched_category["patterns"].append(pattern)

matched_category["weights"] = update_weights(pattern, matched_category["weights"], vigilance)

return categories

# Example usage

input_patterns = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [1, 1, 1, 0]])

vigilance = 0.5

categories = ART_neural_network(input_patterns, vigilance)

# Print the learned categories

for i, category in enumerate(categories):

print(f"Category {i+1}:")
print("Patterns:")

[print(pattern) for pattern in category["patterns"]]

print("Weights:")

print(category["weights"])

print()

You might also like