Code
Code
data = pd.read_csv('../../dataset/linear-regression.csv')
print(data)
alcohol quality
0 9.4 5
1 9.8 5
2 9.8 5
3 9.8 6
4 9.4 5
... ... ...
1594 10.5 5
1595 11.2 6
1596 11.0 6
1597 10.2 5
1598 11.0 6
X = data.drop(columns=['quality'])
y = data['quality']
# the mean squared error loss between the predicted values and the
actual target values.
def compute_loss(X, y, theta):
y_pred = X @ theta
loss = np.mean((y_pred - y) ** 2)
return loss
for lr in learning_rates:
theta_initial = np.zeros(X_train.shape[1] + 1) # +1 for the bias
term
X_train_with_bias = np.c_[np.ones((X_train.shape[0], 1)), X_train]
theta_optimized, losses = gradient_ascent(X_train_with_bias,
y_train, theta_initial, lr, num_iterations)
r2 = 1-(ssr / sst)
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title('Loss Function vs. Iteration for Different Learning Rates')
plt.legend()
plt.show()