Vertopal.com Lesson 3
Vertopal.com Lesson 3
return output
return da, db
an_apple_price = 100
apple_num = 2
consumption_tax = 1.1
# Layer
multiplication_apple_layer = MulLayer()
multiplication_tax_layer = MulLayer()
# Forward propagation
total_apple_price = multiplication_apple_layer.forward(an_apple_price,
apple_num)
total_cost = multiplication_tax_layer.forward(total_apple_price,
consumption_tax)
print(total_cost) #220
220.00000000000003
# Backward propagation
div_price_output = 1
div_total_apple_price, div_tax =
multiplication_tax_layer.backward(div_price_output)
div_an_apple_price, div_apple_num =
multiplication_apple_layer.backward(div_total_apple_price)
print(div_an_apple_price, div_apple_num, div_tax) # 2.2 110 200
2.2 110.00000000000001 200
return output
return da, db
an_apple_price = 100
apple_num = 2
an_orange_price = 150
orange_num = 3
consumption_tax = 1.1
# Layer
multiplication_apple_layer = MulLayer()
multiplication_orange_layer = MulLayer()
addition_apple_orange_layer = AddLayer()
multiplication_tax_layer = MulLayer()
# Forward propagation
total_apple_price = multiplication_apple_layer.forward(an_apple_price,
apple_num) # (1)
total_orange_price =
multiplication_orange_layer.forward(an_orange_price, orange_num) # (2)
total_cost_bef_tax =
addition_apple_orange_layer.forward(total_apple_price,
total_orange_price) # (3)
total_cost_aft_tax =
multiplication_tax_layer.forward(total_cost_bef_tax, consumption_tax)
# (4)
# Backward propation
div_price = 1
div_total_cost_bef_tax, div_tax =
multiplication_tax_layer.backward(div_price) # (4)
div_total_apple_price, div_total_orange_price =
addition_apple_orange_layer.backward(div_total_cost_bef_tax) # (3)
div_an_orange_price, div_orange_num =
multiplication_orange_layer.backward(div_total_orange_price) #(2)
div_an_apple_price, div_apple_num =
multiplication_apple_layer.backward(div_total_apple_price) # (1)
print(total_cost_aft_tax) # 715
print(div_apple_num, div_an_apple_price, div_an_orange_price,
div_orange_num, div_total_cost_bef_tax, div_tax) # 110 2.2 3.3 165 650
715.0000000000001
110.00000000000001 2.2 3.3000000000000003 165.0 1.1 650
return output
return div_a
import numpy as np
a = np.array([[2.0, -0.6], [-3.0, 4.0]])
print(a)
mask = (a <= 0)
print(mask)
[[ 2. -0.6]
[-3. 4. ]]
[[False True]
[ True False]]
[0. 0.]
Sigmoid Layer
class SigMoid:
def __init__(self):
self.output = None
def forward(self, a):
output = 1 / (1 + np.exp(-a))
self.output = output
return output
return div_a
X_input.shape # (2,)
W_weight.shape # (2,3)
B_bias.shape # (3,)
X_dot_W
array([[ 1, 1, 1],
[11, 11, 11]])
X_dot_W + B
array([[ 3, 5, 7],
[13, 15, 17]])
class Affine:
def __init__(self, W, b):
self.W = W
self.b = b
self.x = None
self.div_W = None
self.div_b = None
def forward(self, x):
self.x = x
output = np.dot(x, self.W) + self.b
return output
return div_x
Softmax-with-Loss Layer
class SoftMaxWithLoss:
def __init__(self):
self.loss = None # Loss
self.y = None # Output of softmax
self.t = None # Label data (one-hot vector)
return self.loss
return div_x
Implementing Backpropagation
Implementing a Neural Network That Supports Backpropagation
import sys, os
sys.path.append(os.pardir)
import numpy as np
from common.layers import *
from common.gradient import numerical_gradient
from collections import OrderedDict
class TwoLayerNet:
def __init__(self, input_size, hidden_size, output_size,
weight_init_std=0.01):
# Initialize weights
self.params = {}
self.params['W1'] = weight_init_std * \
np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = weight_init_std * \
np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
# Create layers