0% found this document useful (0 votes)
9 views6 pages

DL Lab 4

Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
9 views6 pages

DL Lab 4

Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 6

U21EC082 - Jinhal Maheshwari

In [1]: # Aim: To study and implement the simple linear regression with and without usin

In [2]: import numpy as np


import pandas as pd
import matplotlib.pyplot as plt

In [3]: data = pd.read_csv(r"C:\Users\aksha\Downloads\detaset.csv")


data

Out[3]: x y

0 0.00 -1.179169

1 0.01 0.515011

2 0.02 -0.047497

3 0.03 0.160731

4 0.04 0.704209

... ... ...

996 9.96 21.899947

997 9.97 21.150909

998 9.98 19.900154

999 9.99 22.883209

1000 10.00 19.949295

1001 rows × 2 columns

In [4]: x = data.iloc[:,0].values
y= data.iloc[:,-1].values

In [5]: x

Out[5]: array([ 0. , 0.01, 0.02, ..., 9.98, 9.99, 10. ])

In [6]: y

Out[6]: array([-1.17916881, 0.51501056, -0.04749717, ..., 19.90015398,


22.88320905, 19.94929548])

In [7]: plt.scatter(x,y)

Out[7]: <matplotlib.collections.PathCollection at 0x1ff82fc4810>


In [8]: from sklearn.model_selection import train_test_split

x_train,x_test,y_train,y_test = train_test_split(x,y,test_size = 0.25,random_sta

In [9]: w=2
b=1
mmse =0
y_pred = np.zeros(len(x))
for i in range(len(x)):
y_pred[i] = w*x[i]+b
mmse+=(y_pred[i]-y[i])**2

mmse = mmse/len(y_pred)
mmse
def compute_gradients(x, y, w, b):
m = len(y)
dw = (1/w) * np.sum((y_pred - y) * x)
db = (1/w) * np.sum(y_pred - y)
dw,db

In [10]: def train_linear_regression(x, y, learning_rate, epochs):


# Step 1: Initialize weights and bias
w = 0.0
b = 0.0

# Step 2: Calculate the number of samples


m = len(y)

# Step 3: Training loop


for epoch in range(epochs):
# Step 4: Calculate predictions
y_pred = w * x + b

# Step 5: Calculate loss (Mean Squared Error)


mse = (1 / m) * sum((y - y_pred) ** 2)

# Step 6: Calculate gradients


dw = -(2 / m) * sum(x * (y - y_pred))
db = -(2 / m) * sum(y - y_pred)

# Step 7: Update weights and bias


w -= learning_rate * dw
b -= learning_rate * db

# Observe loss for each epoch


print(f"Epoch {epoch+1}/{epochs}, Loss: {mse:.4f}, w: {w:.4f}, b: {b:.4f

# Step 8: Print the final values of w and b


print(f"\nFinal values: w = {w:.4f}, b = {b:.4f}")

return w, b

In [11]: def predict(x, w, b):


return w * x + b

w, b = train_linear_regression(x_train, y_train, learning_rate=0.01, epochs=100)


# Predict values for the training and testing datasets
y_train_pred = predict(x_train, w, b)
y_test_pred = predict(x_test, w, b)

plt.figure(figsize=(10, 6))
# Plot training data and prediction line
plt.scatter(x_train, y_train, color='purple',s=10, label='Training Data')
plt.plot(x_train, y_train_pred, color='black', label='Training Fit Line')
# Plot test data and prediction line
plt.scatter(x_test, y_test, color='pink',s=10, label='Test Data')
plt.plot(x_test, y_test_pred, color='yellow', label='Test Fit Line')
# Plot settings
plt.title('Linear Regression on Training and Test Data')
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.grid(True)
plt.legend()
plt.show()
Epoch 1/100, Loss: 140.4707, w: 1.3480, b: 0.2065
Epoch 2/100, Loss: 16.4519, w: 1.7978, b: 0.2762
Epoch 3/100, Loss: 2.6355, w: 1.9478, b: 0.3003
Epoch 4/100, Loss: 1.0962, w: 1.9977, b: 0.3091
Epoch 5/100, Loss: 0.9246, w: 2.0143, b: 0.3127
Epoch 6/100, Loss: 0.9054, w: 2.0197, b: 0.3147
Epoch 7/100, Loss: 0.9031, w: 2.0214, b: 0.3161
Epoch 8/100, Loss: 0.9027, w: 2.0218, b: 0.3173
Epoch 9/100, Loss: 0.9026, w: 2.0219, b: 0.3185
Epoch 10/100, Loss: 0.9024, w: 2.0218, b: 0.3196
Epoch 11/100, Loss: 0.9023, w: 2.0216, b: 0.3207
Epoch 12/100, Loss: 0.9022, w: 2.0215, b: 0.3218
Epoch 13/100, Loss: 0.9021, w: 2.0213, b: 0.3229
Epoch 14/100, Loss: 0.9019, w: 2.0212, b: 0.3240
Epoch 15/100, Loss: 0.9018, w: 2.0210, b: 0.3251
Epoch 16/100, Loss: 0.9017, w: 2.0208, b: 0.3262
Epoch 17/100, Loss: 0.9016, w: 2.0207, b: 0.3272
Epoch 18/100, Loss: 0.9015, w: 2.0205, b: 0.3283
Epoch 19/100, Loss: 0.9014, w: 2.0203, b: 0.3294
Epoch 20/100, Loss: 0.9012, w: 2.0202, b: 0.3304
Epoch 21/100, Loss: 0.9011, w: 2.0200, b: 0.3315
Epoch 22/100, Loss: 0.9010, w: 2.0199, b: 0.3325
Epoch 23/100, Loss: 0.9009, w: 2.0197, b: 0.3336
Epoch 24/100, Loss: 0.9008, w: 2.0195, b: 0.3346
Epoch 25/100, Loss: 0.9007, w: 2.0194, b: 0.3356
Epoch 26/100, Loss: 0.9006, w: 2.0192, b: 0.3366
Epoch 27/100, Loss: 0.9005, w: 2.0191, b: 0.3377
Epoch 28/100, Loss: 0.9004, w: 2.0189, b: 0.3387
Epoch 29/100, Loss: 0.9003, w: 2.0188, b: 0.3397
Epoch 30/100, Loss: 0.9002, w: 2.0186, b: 0.3407
Epoch 31/100, Loss: 0.9001, w: 2.0185, b: 0.3417
Epoch 32/100, Loss: 0.9000, w: 2.0183, b: 0.3427
Epoch 33/100, Loss: 0.8998, w: 2.0182, b: 0.3437
Epoch 34/100, Loss: 0.8997, w: 2.0180, b: 0.3446
Epoch 35/100, Loss: 0.8997, w: 2.0179, b: 0.3456
Epoch 36/100, Loss: 0.8996, w: 2.0177, b: 0.3466
Epoch 37/100, Loss: 0.8995, w: 2.0176, b: 0.3476
Epoch 38/100, Loss: 0.8994, w: 2.0174, b: 0.3485
Epoch 39/100, Loss: 0.8993, w: 2.0173, b: 0.3495
Epoch 40/100, Loss: 0.8992, w: 2.0171, b: 0.3505
Epoch 41/100, Loss: 0.8991, w: 2.0170, b: 0.3514
Epoch 42/100, Loss: 0.8990, w: 2.0168, b: 0.3523
Epoch 43/100, Loss: 0.8989, w: 2.0167, b: 0.3533
Epoch 44/100, Loss: 0.8988, w: 2.0165, b: 0.3542
Epoch 45/100, Loss: 0.8987, w: 2.0164, b: 0.3552
Epoch 46/100, Loss: 0.8986, w: 2.0163, b: 0.3561
Epoch 47/100, Loss: 0.8985, w: 2.0161, b: 0.3570
Epoch 48/100, Loss: 0.8985, w: 2.0160, b: 0.3579
Epoch 49/100, Loss: 0.8984, w: 2.0158, b: 0.3588
Epoch 50/100, Loss: 0.8983, w: 2.0157, b: 0.3597
Epoch 51/100, Loss: 0.8982, w: 2.0156, b: 0.3606
Epoch 52/100, Loss: 0.8981, w: 2.0154, b: 0.3615
Epoch 53/100, Loss: 0.8980, w: 2.0153, b: 0.3624
Epoch 54/100, Loss: 0.8980, w: 2.0152, b: 0.3633
Epoch 55/100, Loss: 0.8979, w: 2.0150, b: 0.3642
Epoch 56/100, Loss: 0.8978, w: 2.0149, b: 0.3651
Epoch 57/100, Loss: 0.8977, w: 2.0148, b: 0.3660
Epoch 58/100, Loss: 0.8976, w: 2.0146, b: 0.3668
Epoch 59/100, Loss: 0.8976, w: 2.0145, b: 0.3677
Epoch 60/100, Loss: 0.8975, w: 2.0144, b: 0.3686
Epoch 61/100, Loss: 0.8974, w: 2.0142, b: 0.3694
Epoch 62/100, Loss: 0.8973, w: 2.0141, b: 0.3703
Epoch 63/100, Loss: 0.8973, w: 2.0140, b: 0.3711
Epoch 64/100, Loss: 0.8972, w: 2.0138, b: 0.3720
Epoch 65/100, Loss: 0.8971, w: 2.0137, b: 0.3728
Epoch 66/100, Loss: 0.8970, w: 2.0136, b: 0.3737
Epoch 67/100, Loss: 0.8970, w: 2.0135, b: 0.3745
Epoch 68/100, Loss: 0.8969, w: 2.0133, b: 0.3753
Epoch 69/100, Loss: 0.8968, w: 2.0132, b: 0.3762
Epoch 70/100, Loss: 0.8967, w: 2.0131, b: 0.3770
Epoch 71/100, Loss: 0.8967, w: 2.0130, b: 0.3778
Epoch 72/100, Loss: 0.8966, w: 2.0128, b: 0.3786
Epoch 73/100, Loss: 0.8965, w: 2.0127, b: 0.3794
Epoch 74/100, Loss: 0.8965, w: 2.0126, b: 0.3802
Epoch 75/100, Loss: 0.8964, w: 2.0125, b: 0.3810
Epoch 76/100, Loss: 0.8963, w: 2.0123, b: 0.3818
Epoch 77/100, Loss: 0.8963, w: 2.0122, b: 0.3826
Epoch 78/100, Loss: 0.8962, w: 2.0121, b: 0.3834
Epoch 79/100, Loss: 0.8962, w: 2.0120, b: 0.3842
Epoch 80/100, Loss: 0.8961, w: 2.0119, b: 0.3850
Epoch 81/100, Loss: 0.8960, w: 2.0117, b: 0.3857
Epoch 82/100, Loss: 0.8960, w: 2.0116, b: 0.3865
Epoch 83/100, Loss: 0.8959, w: 2.0115, b: 0.3873
Epoch 84/100, Loss: 0.8958, w: 2.0114, b: 0.3881
Epoch 85/100, Loss: 0.8958, w: 2.0113, b: 0.3888
Epoch 86/100, Loss: 0.8957, w: 2.0112, b: 0.3896
Epoch 87/100, Loss: 0.8957, w: 2.0110, b: 0.3903
Epoch 88/100, Loss: 0.8956, w: 2.0109, b: 0.3911
Epoch 89/100, Loss: 0.8956, w: 2.0108, b: 0.3918
Epoch 90/100, Loss: 0.8955, w: 2.0107, b: 0.3926
Epoch 91/100, Loss: 0.8954, w: 2.0106, b: 0.3933
Epoch 92/100, Loss: 0.8954, w: 2.0105, b: 0.3940
Epoch 93/100, Loss: 0.8953, w: 2.0104, b: 0.3948
Epoch 94/100, Loss: 0.8953, w: 2.0103, b: 0.3955
Epoch 95/100, Loss: 0.8952, w: 2.0101, b: 0.3962
Epoch 96/100, Loss: 0.8952, w: 2.0100, b: 0.3969
Epoch 97/100, Loss: 0.8951, w: 2.0099, b: 0.3977
Epoch 98/100, Loss: 0.8951, w: 2.0098, b: 0.3984
Epoch 99/100, Loss: 0.8950, w: 2.0097, b: 0.3991
Epoch 100/100, Loss: 0.8950, w: 2.0096, b: 0.3998

Final values: w = 2.0096, b = 0.3998

You might also like