21bit0706 VL2024250106861 Da
21bit0706 VL2024250106861 Da
21bit0706 VL2024250106861 Da
Code:
!pip install -q Kaggle
!mkdir `/.Kaggle
!cp /content/kaggle.json /.Kaggle
! kaggle datasets download -d mssmartypants/paris-housing-price-prediction
! unzip /content/paris-housing-price-prediction.zip
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
df = pd.read_csv('/content/ParisHousing.csv')
df.head()
df.isnull().sum()
Gradient Descent
Code
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.metrics import classification_report, accuracy_score
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# Load the Iris dataset
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
column_names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']
data = pd.read_csv(url, delimiter=',', names=column_names)
print(data.head())
class LogisticRegressionGD:
def __init__(self, learning_rate=0.01, n_iterations=1000):
self.learning_rate = learning_rate
self.n_iterations = n_iterations
for i in range(self.n_iterations):
y_pred = self.predict(X)
cost = self._compute_cost(y_pred, y)
self._update_weights(X, y, y_pred)
"""**Performance metric bar charts and loss history plots for both Models**"""
x = range(len(metrics))
plt.figure(figsize=(12, 6))
plt.bar(x, gd_values, width=0.4, label='Gradient Descent', align='center', color='b')
plt.bar([i + 0.4 for i in x], bpnn_values, width=0.4, label='BPNN', align='center',
color='g')
plt.xlabel('Performance Metrics')
plt.ylabel('Values')
plt.title('Performance Metrics Comparison')
plt.xticks([i + 0.2 for i in x], metrics)
plt.legend()
plt.show()
# Plot the cost history for Gradient Descent and loss history for BPNN in the same
chart
plt.figure(figsize=(12, 6))
plt.plot(model_gd.cost_history, label='Gradient Descent Training Cost', color='b')
plt.plot(history.history['loss'], label='BPNN Training Loss', color='g')
plt.plot(history.history['val_loss'], label='BPNN Validation Loss', color='r')
plt.xlabel('Iterations/Epochs')
plt.ylabel('Cost/Loss')
plt.title('Training Cost/Loss History')
plt.legend()
plt.show()
LinearRegression
Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, r2_score
for i in range(iterations):
gradients = X.T.dot(X.dot(theta) - y) / m
theta -= alpha * gradients
cost_history[i] = mean_squared_error(y, X.dot(theta))