Artificial Neural Networks PDF
Artificial Neural Networks PDF
Practical File
Name/sec-Tanuj Vats/IT-1
Roll no.- 2020UIT3017
Subject-Artificial Neural Networks
INDEX
S.No. Experiment Teacher’s
Remarks
1. Introduction to programming
languages.
2. Implement OR using MP neuron
3. Implement NOR using MP neuron
4. Implement AND using Hebb
5. Implement ANDNOT using Hebb
6. Implement perceptron learning
rule for OR function
7. Implement perceptron learning
rule for AND function
8. Implement OR using Adaline
9. Implement Backpropagation
network for any given dataset
10. Implement heteroassociative
memory for pattern recognition
11. Implement bidirectional memory
for pattern recognition
12. Implement Hopfield network for
any given pattern
13. Project
Experiment- 1
Programming languages are tools that allow humans to
communicate with computers and create software
applications. They are a set of rules and syntax that define how
instructions are written to perform specific tasks or solve
particular problems.
import numpy as np
T=1
for i in range(0, 4):
output = AF(dot[i], T)
print(f"Output: {output} for input: {input_table[i]}")
Output:
Experiment- 3
import numpy as np
T=0
for i in range(0, 4):
output = AF(dot[i], T)
print(f"Output: {output} for input: {input_table[i]}")
Output:
Experiment- 4
import numpy as np
def hebbian(input_table):
w1, w2, b = 0,0,0
for x1, x2, t in input_table:
w1 += x1 * t
w2 += x2 * t
b += t
print(f"x1:{x1}, x2:{x2}, y:{t}, w1:{w1}, w2:{w2}, b:{b}")
hebbian(input_table)
Output:
Experiment- 5
input_table = np.array([[-1, -1, -1], [-1, 1, -1], [1, -1, 1], [1, 1, -1]])
def hebbian(input_table):
w1, w2, b = 0, 0, 0
for x1, x2, t in input_table:
w1 += x1 * t
w2 += x2 * t
b += t
print(f"x1:{x1}, x2:{x2}, y:{t}, w1:{w1}, w2:{w2}, b:{b}")
hebbian(input_table)
Output:
Experiment- 6
import numpy as np
input_table = np.array([[-1, -1, -1], [-1, 1, 1], [1, -1, 1], [1, 1, 1]])
theta = 0.5
percep(input_table, theta)
Output:
Experiment- 7
import numpy as np
input_table = np.array([[-1, -1, -1], [-1, 1, -1], [1, -1, -1], [1, 1, 1]])
def f(x, theta):
if x > theta:
return 1
elif x <= theta and x >= -1*theta:
return 0
else:
return -1
def percep(input_table, theta):
w1, w2, b = 0, 0, 0
for x1, x2, t in input_table:
yin = x1*w1 + x2*w2 + b
y = f(yin, theta)
if t != y:
w1 += x1 * t
w2 += x2 * t
b += t
print(f"x1:{x1}, x2:{x2}, y:{t}, w1:{w1}, w2:{w2}, b:{b}")
theta = 0.5
percep(input_table, theta)
Output:
Experiment- 8
import numpy as np
class Adaline:
def __init__(self, learning_rate=0.1, epochs=100, random_state=1):
self.learning_rate = learning_rate
self.epochs = epochs
self.random_state = random_state
def fit(self, X, y):
self.w_ = np.random.RandomState(self.random_state).normal(
loc=0.0, scale=0.01, size=1 + X.shape[1])
self.cost_ = []
for _ in range(self.epochs):
net_input = self.net_input(X)
output = self.activation(net_input)
errors = y - output
self.w_[1:] += self.learning_rate * X.T.dot(errors)
self.w_[0] += self.learning_rate * errors.sum()
cost = (errors ** 2).sum() / 2.0
self.cost_.append(cost)
return self
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def activation(self, X):
return X
def predict(self, X):
return np.where(self.net_input(X) >= 0.0, 1, 0)
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([0, 1, 1, 1])
adaline = Adaline(learning_rate=0.1, epochs=100)
adaline.fit(X, y)
new_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
predictions = adaline.predict(new_data)
print("Predictions: ", predictions)
Output:
Experiment- 9
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
import numpy as np
model = Sequential()
model.add(Dense(units=2, input_dim=2, activation='sigmoid'))
model.add(Dense(units=4, activation='sigmoid'))
model.add(Dense(units=1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=SGD(lr=0.1), metrics=['accuracy'])
model.fit(X, y, epochs=10000, batch_size=1, verbose=0)
loss, accuracy = model.evaluate(X, y)
print("Loss: {:.4f}, Accuracy: {:.4f}".format(loss, accuracy))
Output:
Experiment- 10
import numpy as np
class HeteroassociativeMemory:
def __init__(self, activation_function='sign', threshold=0.5):
self.activation_function = activation_function
self.threshold = threshold
def fit(self, X, Y):
self.W = np.dot(X.T, Y)
def recall(self, X):
net_input = np.dot(X, self.W)
if self.activation_function == 'sign':
return np.where(net_input >= self.threshold, 1, -1)
elif self.activation_function == 'binary':
return np.where(net_input >= self.threshold, 1, 0)
else:
raise ValueError(
"Invalid activation function. Choose 'sign' or 'binary'.")
X = np.array([[1, 1, 0, 0],
[1, 0, 1, 0],
[0, 0, 1, 1]])
Y = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]])
heteroassoc = HeteroassociativeMemory(
activation_function='sign', threshold=0.5)
heteroassoc.fit(X, Y)
recall_X = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]])
recall_Y = heteroassoc.recall(recall_X)
print("Recalled patterns: \n", recall_Y)
Output:
Experiment- 11
import numpy as np
class BAM:
def __init__(self):
pass
def fit(self, X, Y):
self.W = np.dot(Y.T, X)
def recall(self, X):
Y = np.dot(X, self.W.T)
Y[Y < 0] = 0
Y[Y > 0] = 1
return Y
X = np.array([[1, 1, 0],
[1, 0, 1],
[0, 0, 1]])
Y = np.array([[1, 0, 0, 1],
[0, 1, 0, 1],
[0, 0, 1, 1]])
bam = BAM()
bam.fit(X, Y)
recall_X = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 0]])
recall_Y = bam.recall(recall_X)
print("Recalled patterns: \n", recall_Y)
Output:
Experiment- 12
import numpy as np
class HopfieldNetwork:
def __init__(self, n_units):
self.n_units = n_units
self.weights = np.zeros((n_units, n_units))
Output: