0% found this document useful (0 votes)
4 views

ml assignment

The document contains an assignment by Shehroz Khan, including code for data scaling and gradient descent algorithms using both batch and stochastic methods. It defines functions for scaling data, computing cost, and performing gradient descent, and it outputs the final weights and costs after 1000 iterations. The code utilizes NumPy for numerical operations and random for stochastic sampling.

Uploaded by

shehroz tariq
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
4 views

ml assignment

The document contains an assignment by Shehroz Khan, including code for data scaling and gradient descent algorithms using both batch and stochastic methods. It defines functions for scaling data, computing cost, and performing gradient descent, and it outputs the final weights and costs after 1000 iterations. The code utilizes NumPy for numerical operations and random for stochastic sampling.

Uploaded by

shehroz tariq
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 5

NAME : SHEHROZ KHAN

REG NO: BCS221054

ASSIGNMENT NO 2
SCREENSHOTS:
OUTPUT:
CODE:
import numpy as np

import random

data = np.array([

[64.7, 163, 89.6],

[54, 153, 85.5],

[61.4, 165, 70.1],

[74, 171, 91.1],

[63.6, 159, 74.3],

[64.6, 156, 85.5],

[81.5, 169, 95.6],

[64.3, 169, 79.5],

[77.7, 167, 100],

[95.4, 178, 103.2],

[62.6, 176, 73.8],

[67.3, 152, 96.7],

[54.2, 162, 75],

[60.9, 170, 79]

])

def fscaling(data):

min_val = data.min(axis=0)

max_val = data.max(axis=0)

return (data - min_val) / (max_val - min_val)


scaled_data = fscaling(data)

def compute_cost(X, y, weights):

m = len(y)

predictions = X.dot(weights)

cost = (1 / (2 * m)) * np.sum((predictions - y) ** 2)

return cost

def gradient_descent(X, y, weights, learning_rate, iterations):

m = len(y)

cost_history = []

for i in range(iterations):

predictions = X.dot(weights)

gradients = (1 / m) * X.T.dot(predictions - y)

weights -= learning_rate * gradients

cost = compute_cost(X, y, weights)

cost_history.append(cost)

return weights, cost_history

def b_gradient_descent(data, learning_rate=0.01, iterations=1000):

X = np.c_[np.ones(data.shape[0]), data[:, :-1]]

y = data[:, -1]

weights = np.zeros(X.shape[1])

weights, cost_history = gradient_descent(X, y, weights, learning_rate, iterations)

return weights, cost_history

def sgd_gradient_descent(data, learning_rate=0.01, iterations=1000):

X = np.c_[np.ones(data.shape[0]), data[:, :-1]]

y = data[:, -1]

weights = np.zeros(X.shape[1])

cost_history = []
for i in range(iterations):

random_index = random.randint(0, len(y) - 1)

x_i = X[random_index:random_index + 1]

y_i = y[random_index:random_index + 1]

predictions = x_i.dot(weights)

gradients = x_i.T.dot(predictions - y_i)

weights -= learning_rate * gradients

cost = compute_cost(X, y, weights)

cost_history.append(cost)

return weights, cost_history

print(" Gradient Descent")

weights_b, cost_history_b = b_gradient_descent(scaled_data)

print("Final Weights:", weights_batch)

print("Final Cost after 1000 iterations:", cost_history_batch[-1])

print("\n sgd Gradient Descent")

weights_sgd, cost_history_sgd = sgd_gradient_descent(scaled_data)

print("Final Weights:", weights_sgd)

print("Final Cost after 1000 iterations:", cost_history_sgd[-1])

You might also like