AAM pr-9
AAM pr-9
CODE:
import numpy as np
# Sigmoid activation and its derivative
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(x):
return x * (1 - x)
# Weights
wh = np.random.uniform(size=(input_layer_neurons, hidden_layer_neurons)) # 2x2
bh = np.random.uniform(size=(1, hidden_layer_neurons)) # 1x2
wo = np.random.uniform(size=(hidden_layer_neurons, output_neurons)) # 2x1
bo = np.random.uniform(size=(1, output_neurons)) # 1x1
# Training loop
for epoch in range(10000):
error_hidden = d_output.dot(wo.T)
d_hidden = error_hidden * sigmoid_derivative(hidden_output)
OUTPUT:
[[0.06368082]
[0.94085536]
[0.94108726]
[0.06402009]]