Lab Exam ... Roll No 24cs4103
Lab Exam ... Roll No 24cs4103
Lab Exam ... Roll No 24cs4103
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
plt.subplot(1, 2, 1)
plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k', cmap=plt.cm.Set1)
plt.title('Iris Dataset: Original Data Points')
plt.xlabel(iris.feature_names[0])
plt.ylabel(iris.feature_names[1])
plt.grid()
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.subplot(1, 2, 2)
plt.contourf(xx, yy, Z, alpha=0.8, cmap=plt.cm.Set1)
plt.scatter(X[:, 0], X[:, 1], c=y, marker='o', label='Data Points')
plt.scatter(X_test[:, 0], X_test[:, 1], c='black', marker='x', label='Test Data')
plt.title('Logistic Regression Decision Boundary')
plt.xlabel(iris.feature_names[0])
plt.ylabel(iris.feature_names[1])
plt.legend()
plt.grid()
plt.tight_layout()
plt.show()
2. Logistic Regression Without Using Inbuilt Functions
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
class LogisticRegressionScratch:
def __init__(self, learning_rate=0.01, num_iterations=1000):
self.learning_rate = learning_rate
self.num_iterations = num_iterations
self.weights = None
self.bias = None
for _ in range(self.num_iterations):
# Linear model
linear_model = np.dot(X, self.weights) + self.bias
probabilities = self.softmax(linear_model)
# Compute gradients
dw = (1 / num_samples) * np.dot(X.T, (probabilities - y_one_hot))
db = (1 / num_samples) * np.sum(probabilities - y_one_hot, axis=0)
plt.subplot(1, 2, 1)
plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k', cmap=plt.cm.Set1)
plt.title('Iris Dataset: Original Data Points')
plt.xlabel(iris.feature_names[0])
plt.ylabel(iris.feature_names[1])
plt.grid()
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.subplot(1, 2, 2)
plt.contourf(xx, yy, Z, alpha=0.8, cmap=plt.cm.Set1)
plt.scatter(X[:, 0], X[:, 1], c=y, marker='o', label='Data Points')
plt.scatter(X_test[:, 0], X_test[:, 1], c='black', marker='x', label='Test Data')
plt.title('Logistic Regression Decision Boundary')
plt.xlabel(iris.feature_names[0])
plt.ylabel(iris.feature_names[1])
plt.legend()
plt.grid()
plt.tight_layout()
plt.show()