0% found this document useful (0 votes)
27 views7 pages

Math Lab 1

The document describes the implementation of a basic multi-layer neural network for classification. It includes functions for initializing the network, forwarding inputs through the network, calculating outputs, backpropagating error to update weights, and training the network over multiple epochs on a sample dataset to classify inputs into two classes. The network is initialized with random weights, and trained for 60 epochs with a learning rate of 1 to minimize the error on the training data.

Uploaded by

V MANI KUMAR
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
27 views7 pages

Math Lab 1

The document describes the implementation of a basic multi-layer neural network for classification. It includes functions for initializing the network, forwarding inputs through the network, calculating outputs, backpropagating error to update weights, and training the network over multiple epochs on a sample dataset to classify inputs into two classes. The network is initialized with random weights, and trained for 60 epochs with a learning rate of 1 to minimize the error on the training data.

Uploaded by

V MANI KUMAR
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 7

math-lab-1

December 18, 2023

[16]: from math import exp

# Calculate neuron activation for an input


def activate(weights, inputs):
activation = weights[-1]
for i in range(len(weights)-1):
activation += weights[i] * inputs[i]
return activation

# Transfer neuron activation


def transfer(activation):
return 1.0 / (1.0 + exp(-activation))

# Forward propagate input to a network output


def forward_propagate(network, row):
inputs = row
for layer in network:
new_inputs = []
for neuron in layer:
activation = activate(neuron['weights'], inputs)
neuron['output'] = transfer(activation)
new_inputs.append(neuron['output'])
inputs = new_inputs
return inputs

# Make a prediction with a network


def predict(network, row):
outputs = forward_propagate(network, row)
return outputs.index(max(outputs))

# Test making predictions with the network


dataset = [[2.7810836,2.550537003,0],
[1.465489372,2.362125076,0],
[3.396561688,4.400293529,0],
[1.38807019,1.850220317,0],
[3.06407232,3.005305973,0],
[7.627531214,2.759262235,1],

1
[5.332441248,2.088626775,1],
[6.922596716,1.77106367,1],
[8.675418651,-0.242068655,1],
[7.673756466,3.508563011,1]]
network = [[{'weights': [-1.482313569067226, 1.8308790073202204, 1.
↪078381922048799]}, {'weights': [0.23244990332399884, 0.3621998343835864, 0.

↪40289821191094327]}],

[{'weights': [2.5001872433501404, 0.7887233511355132, -1.


↪1026649757805829]}, {'weights': [-2.429350576245497, 0.8357651039198697, 1.

↪0699217181280656]}]]

for row in dataset:


prediction = predict(network, row)
print('Expected=%d, Got=%d' % (row[-1], prediction))

Expected=0, Got=0
Expected=0, Got=0
Expected=0, Got=0
Expected=0, Got=0
Expected=0, Got=0
Expected=1, Got=1
Expected=1, Got=1
Expected=1, Got=1
Expected=1, Got=1
Expected=1, Got=1

[22]: from math import exp


from random import seed
from random import random

# Initialize a network
def initialize_network(n_inputs, n_hidden, n_outputs):
network = list()
hidden_layer = [{'weights':[random() for i in range(n_inputs + 1)]} for␣
↪i in range(n_hidden)]

network.append(hidden_layer)
output_layer = [{'weights':[random() for i in range(n_hidden + 1)]} for␣
↪i in range(n_outputs)]

network.append(output_layer)
return network

# Calculate neuron activation for an input


def activate(weights, inputs):
activation = weights[-1]
for i in range(len(weights)-1):
activation += weights[i] * inputs[i]
return activation

2
# Transfer neuron activation
def transfer(activation):
return 1.0 / (1.0 + exp(-activation))

# Forward propagate input to a network output


def forward_propagate(network, row):
inputs = row
for layer in network:
new_inputs = []
for neuron in layer:
activation = activate(neuron['weights'], inputs)
neuron['output'] = transfer(activation)
new_inputs.append(neuron['output'])
inputs = new_inputs
return inputs

# Calculate the derivative of an neuron output


def transfer_derivative(output):
return output * (1.0 - output)

# Backpropagate error and store in neurons


def backward_propagate_error(network, expected):
for i in reversed(range(len(network))):
layer = network[i]
errors = list()
if i != len(network)-1:
for j in range(len(layer)):
error = 0.0
for neuron in network[i + 1]:
error += (neuron['weights'][j] *␣
↪neuron['delta'])

errors.append(error)
else:
for j in range(len(layer)):
neuron = layer[j]
errors.append(neuron['output'] - expected[j])
for j in range(len(layer)):
neuron = layer[j]
neuron['delta'] = errors[j] *␣
↪transfer_derivative(neuron['output'])

# Update network weights with error


def update_weights(network, row, l_rate):
for i in range(len(network)):
inputs = row[:-1]
if i != 0:
inputs = [neuron['output'] for neuron in network[i - 1]]

3
for neuron in network[i]:
for j in range(len(inputs)):
neuron['weights'][j] -= l_rate *␣
↪neuron['delta'] * inputs[j]

neuron['weights'][-1] -= l_rate * neuron['delta']

# Train a network for a fixed number of epochs


def train_network(network, train, l_rate, n_epoch, n_outputs):
for epoch in range(n_epoch):
sum_error = 0
for row in train:
outputs = forward_propagate(network, row)
expected = [0 for i in range(n_outputs)]
expected[row[-1]] = 1
sum_error += sum([(expected[i]-outputs[i])**2 for i in␣
↪range(len(expected))])

backward_propagate_error(network, expected)
update_weights(network, row, l_rate)
print('>epoch=%d, lrate=%.3f, error=%.3f' % (epoch, l_rate,␣
↪sum_error))

# Test training backprop algorithm


seed(1)
dataset = [[2.7810836,2.550537003,0],
[1.465489372,2.362125076,0],
[3.396561688,4.400293529,0],
[1.38807019,1.850220317,0],
[3.06407232,3.005305973,0],
[7.627531214,2.759262235,1],
[5.332441248,2.088626775,1],
[6.922596716,1.77106367,1],
[8.675418651,-0.242068655,1],
[7.673756466,3.508563011,1]]
n_inputs = len(dataset[0]) - 1
n_outputs = len(set([row[-1] for row in dataset]))
network = initialize_network(n_inputs, 2, n_outputs)
train_network(network, dataset, 1, 60, n_outputs)
for layer in network:
print(layer)

>epoch=0, lrate=1.000, error=5.969


>epoch=1, lrate=1.000, error=5.314
>epoch=2, lrate=1.000, error=4.825
>epoch=3, lrate=1.000, error=4.109
>epoch=4, lrate=1.000, error=3.437
>epoch=5, lrate=1.000, error=2.821
>epoch=6, lrate=1.000, error=2.331

4
>epoch=7, lrate=1.000, error=1.937
>epoch=8, lrate=1.000, error=1.621
>epoch=9, lrate=1.000, error=1.365
>epoch=10, lrate=1.000, error=1.157
>epoch=11, lrate=1.000, error=0.989
>epoch=12, lrate=1.000, error=0.855
>epoch=13, lrate=1.000, error=0.747
>epoch=14, lrate=1.000, error=0.659
>epoch=15, lrate=1.000, error=0.586
>epoch=16, lrate=1.000, error=0.527
>epoch=17, lrate=1.000, error=0.476
>epoch=18, lrate=1.000, error=0.434
>epoch=19, lrate=1.000, error=0.397
>epoch=20, lrate=1.000, error=0.366
>epoch=21, lrate=1.000, error=0.339
>epoch=22, lrate=1.000, error=0.315
>epoch=23, lrate=1.000, error=0.294
>epoch=24, lrate=1.000, error=0.275
>epoch=25, lrate=1.000, error=0.258
>epoch=26, lrate=1.000, error=0.244
>epoch=27, lrate=1.000, error=0.230
>epoch=28, lrate=1.000, error=0.218
>epoch=29, lrate=1.000, error=0.207
>epoch=30, lrate=1.000, error=0.197
>epoch=31, lrate=1.000, error=0.188
>epoch=32, lrate=1.000, error=0.179
>epoch=33, lrate=1.000, error=0.172
>epoch=34, lrate=1.000, error=0.165
>epoch=35, lrate=1.000, error=0.158
>epoch=36, lrate=1.000, error=0.152
>epoch=37, lrate=1.000, error=0.146
>epoch=38, lrate=1.000, error=0.141
>epoch=39, lrate=1.000, error=0.136
>epoch=40, lrate=1.000, error=0.131
>epoch=41, lrate=1.000, error=0.127
>epoch=42, lrate=1.000, error=0.123
>epoch=43, lrate=1.000, error=0.119
>epoch=44, lrate=1.000, error=0.115
>epoch=45, lrate=1.000, error=0.112
>epoch=46, lrate=1.000, error=0.109
>epoch=47, lrate=1.000, error=0.105
>epoch=48, lrate=1.000, error=0.103
>epoch=49, lrate=1.000, error=0.100
>epoch=50, lrate=1.000, error=0.097
>epoch=51, lrate=1.000, error=0.095
>epoch=52, lrate=1.000, error=0.092
>epoch=53, lrate=1.000, error=0.090
>epoch=54, lrate=1.000, error=0.088

5
>epoch=55, lrate=1.000, error=0.086
>epoch=56, lrate=1.000, error=0.084
>epoch=57, lrate=1.000, error=0.082
>epoch=58, lrate=1.000, error=0.080
>epoch=59, lrate=1.000, error=0.078
[{'weights': [-1.9598419667150966, 2.6239832962806067, 1.3882488579080972],
'output': 0.012004364044413891, 'delta': 0.00047484739603003887}, {'weights':
[-1.2471410186511618, 1.581638968485985, 0.7999207962427648], 'output':
0.04034348306913755, 'delta': 0.0007190111183387699}]
[{'weights': [3.8149956061436465, 2.203918566765813, -2.696403338915056],
'output': 0.07196576384975523, 'delta': 0.00480635535395085}, {'weights':
[-4.288688346420154, -1.5764744416485663, 2.6481765752481286], 'output':
0.9260832267280501, 'delta': -0.005059831082476945}]

[21]: import math

# Initial weights and biases


W13 = 0.1
W14 = 0.4
W23 = 0.8
W24 = 0.6
W35 = 0.3
W45 = 0.9

# Inputs
X1 = 0.5
X2 = 0.6

# Target output
target_output = 0.5

# Learning rate
alpha = 1.0

# Forward Pass
Y3 = 1 / (1 + math.exp(-(W13 * X1 + W23 * X2)))
Y4 = 1 / (1 + math.exp(-(W14 * X1 + W24 * X2)))
Y5 = 1 / (1 + math.exp(-(W35 * Y3 + W45 * Y4)))

# Error calculation
error = 0.5 * (target_output - Y5)**2

# Backward Pass (Gradient Descent)


dE_dY5 = Y5 - target_output
dY5_dz5 = Y5 * (1 - Y5)
delta5 = dE_dY5 * dY5_dz5

6
dE_dW35 = delta5 * Y3
dE_dW45 = delta5 * Y4

# Update weights
W35 = W35 - alpha * dE_dW35
W45 = W45 - alpha * dE_dW45

# Another Forward Pass with Updated Weights


new_Y5 = 1 / (1 + math.exp(-(W35 * Y3 + W45 * Y4)))

# Print results
print("Value of Y3:", Y3)
print("Value of Y4:", Y4)
print("Error:", error)
print("Final Output Y5:", new_Y5)
print("Updated Weights:")
print("W13:", W13)
print("W14:", W14)
print("W23:", W23)
print("W24:", W24)
print("W35:", W35)
print("W45:", W45)

Value of Y3: 0.6294831119673949


Value of Y4: 0.6364525402815664
Error: 0.016509687447868068
Final Output Y5: 0.6748179335379595
Updated Weights:
W13: 0.1
W14: 0.4
W23: 0.8
W24: 0.6
W35: 0.2751807037581705
W45: 0.874905912738877

You might also like