0% found this document useful (0 votes)
12 views2 pages

Ajmal Code

Code fore Assignment

Uploaded by

nisar03028387462
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
12 views2 pages

Ajmal Code

Code fore Assignment

Uploaded by

nisar03028387462
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 2

12/23/24, 11:37 PM Untitled25.

ipynb - Colab

import numpy as np

def sigmoid(x):
return 1 / (1 + np.exp(-x))

def sigmoid_derivative(x):
return x * (1 - x)

class MLP:
def __init__(self):
self.input_weights = np.random.rand(2, 2)
self.hidden_weights = np.random.rand(2, 1)
self.hidden_bias = np.random.rand(1, 2)
self.output_bias = np.random.rand(1, 1)

def forward(self, inputs):


self.hidden_layer_input = np.dot(inputs, self.input_weights) + self.hidden_bias
self.hidden_layer_output = sigmoid(self.hidden_layer_input)
self.output_layer_input = np.dot(self.hidden_layer_output, self.hidden_weights) + self.output_bias
self.output = sigmoid(self.output_layer_input)
return self.output

def train(self, inputs, outputs, epochs=10000, learning_rate=0.1):


for epoch in range(epochs):
self.forward(inputs)
error = outputs - self.output
output_gradient = error * sigmoid_derivative(self.output)
hidden_gradient = output_gradient.dot(self.hidden_weights.T) * sigmoid_derivative(self.hidden_layer_output)
self.hidden_weights += self.hidden_layer_output.T.dot(output_gradient) * learning_rate
self.output_bias += np.sum(output_gradient, axis=0) * learning_rate
self.input_weights += inputs.T.dot(hidden_gradient) * learning_rate
self.hidden_bias += np.sum(hidden_gradient, axis=0) * learning_rate

inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])


outputs_and = np.array([[0], [0], [0], [1]])
outputs_or = np.array([[0], [1], [1], [1]])
outputs_xor = np.array([[0], [1], [1], [0]])

for gate, outputs in zip(["AND", "OR", "XOR"], [outputs_and, outputs_or, outputs_xor]):


mlp = MLP()
mlp.train(inputs, outputs)
print(f"\n{gate} Gate Results:")
for i in inputs:
print(f"Input: {i}, Output: {np.round(mlp.forward(i))}")

AND Gate Results:


Input: [0 0], Output: [[0.]]
Input: [0 1], Output: [[0.]]
Input: [1 0], Output: [[0.]]
Input: [1 1], Output: [[1.]]

OR Gate Results:
Input: [0 0], Output: [[0.]]
Input: [0 1], Output: [[1.]]
Input: [1 0], Output: [[1.]]
Input: [1 1], Output: [[1.]]

XOR Gate Results:


Input: [0 0], Output: [[0.]]
Input: [0 1], Output: [[1.]]
Input: [1 0], Output: [[1.]]
Input: [1 1], Output: [[0.]]

Start coding or generate with AI.

https://colab.research.google.com/drive/1YO0uWodxUmtXMqkaQ2EziH50lCZGb__L#scrollTo=sBI03_xrwXDZ&printMode=true 1/2
12/23/24, 11:37 PM Untitled25.ipynb - Colab

https://colab.research.google.com/drive/1YO0uWodxUmtXMqkaQ2EziH50lCZGb__L#scrollTo=sBI03_xrwXDZ&printMode=true 2/2

You might also like