Btech1007022 Lab5
Btech1007022 Lab5
Roll - BTECH/10066/22
LAB-5
Program1:
import csv
import numpy as np
data = []
reader = csv.reader(file)
data.append([float(row[0]), float(row[1])])
plt.xlabel('Experience (years)')
plt.ylabel('Salary')
plt.show()
# Initialize parameters
m = 0 # Slope
b = 0 # Intercept
learning_rate = 0.01
iterations = 1000
n = len(X)
total_error = 0
for i in range(len(X)):
return total_error / n
# Gradient Descent
errors = []
for _ in range(iterations):
m_grad = 0
b_grad = 0
for i in range(len(X)):
m -= (m_grad / n) * learning_rate
b -= (b_grad / n) * learning_rate
mse = compute_mse(X, Y, m, b)
errors.append(mse)
plt.xlabel('Iteration')
plt.show()
plt.scatter(X, Y, color='blue')
plt.plot(X, [m * x + b for x in X], color='red') # Best fit line
plt.xlabel('Experience (years)')
plt.ylabel('Salary')
plt.show()
OUTPUT:
Program2:
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
OUTPUT:
Program3:
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
# Split the data into training and testing sets (80% training, 20% testing)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
for _ in range(self.iterations):
linear_model = np.dot(X, self.weights) + self.bias
y_pred = self.sigmoid(linear_model)
self.weights -= self.learning_rate * dw
self.bias -= self.learning_rate * db
OUTPUT: