0% found this document useful (0 votes)
3 views

AML Lab Assignment 9

AML Lab Assignment 9
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
3 views

AML Lab Assignment 9

AML Lab Assignment 9
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 2

import numpy as np

X = np.array(([2, 9], [1, 5], [3, 6]), dtype=float) # X = (hours sleeping, hours stu
y = np.array(([92], [86], [89]), dtype=float) # y = score on test

# scale units
X = X/np.amax(X, axis=0) # maximum of X array
y = y/100 # max test score is 100
class Neural_Network(object):
def __init__(self):
# Parameters
self.inputSize = 2
self.outputSize = 1
self.hiddenSize = 3
# Weights
self.W1 = np.random.randn(self.inputSize, self.hiddenSize) # (3x2) weight
self.W2 = np.random.randn(self.hiddenSize, self.outputSize) # (3x1) weight

def forward(self, X):


#forward propagation through our network
self.z = np.dot(X, self.W1) # dot product of X (input) and first s
self.z2 = self.sigmoid(self.z) # activation function
self.z3 = np.dot(self.z2, self.W2) # dot product of hidden layer (z2) and
o = self.sigmoid(self.z3) # final activation function
return o

def sigmoid(self, s):


return 1/(1+np.exp(‐s)) # activation function

def sigmoidPrime(self, s):


return s * (1 ‐ s) # derivative of sigmoid
def backward(self, X, y, o):
# backward propgate through the network
self.o_error = y ‐ o # error in output
self.o_delta = self.o_error*self.sigmoidPrime(o) # applying derivative of sigmoi
self.z2_error = self.o_delta.dot(self.W2.T) # z2 error: how much our hidden l
self.z2_delta = self.z2_error*self.sigmoidPrime(self.z2) # applying derivative o
self.W1 += X.T.dot(self.z2_delta) # adjusting first set (input ‐‐> hidden)
self.W2 += self.z2.T.dot(self.o_delta) # adjusting second set (hidden ‐‐> outpu

def train (self, X, y):


o = self.forward(X)
self.backward(X, y, o)
NN = Neural_Network()
for i in range(1000): # trains the NN 1,000 times
print ("\nInput: \n" + str(X))
print ("\nActual Output: \n" + str(y))
print ("\nPredicted Output: \n" + str(NN.forward(X)))
print ("\nLoss: \n" + str(np.mean(np.square(y ‐ NN.forward(X))))) # mean sum squ
NN.train(X, y)

You might also like