AI Lab M.Tech
AI Lab M.Tech
TECHNOLOGY BHALKI
ALGORITHMS
&
AI LABORATORY
MCSL106
Lab Manual
1|Page
1. Implement a simple linear regression algorithm to predict a continuous target variable
based on a given dataset.
import numpy as np
np.random.seed(42)
X = 2 * np.random.rand(100, 1)
y = 4 + 3 * X + np.random.randn(100, 1)
theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)
# Make predictions
y_pred = X_test_b.dot(theta_best)
# Plot results
plt.xlabel("X")
2|Page
plt.ylabel("y")
plt.legend()
plt.show()
Output:
3|Page
2. Develop a program to implement a Support Vector Machine for binary classification.
Use a sample dataset and visualize the decision boundary.
import numpy as np
svm_classifier = SVC(kernel='linear')
svm_classifier.fit(X_train, y_train)
# Make predictions
y_pred = svm_classifier.predict(X_test)
# Calculate accuracy
print(f"Accuracy: {accuracy:.4f}")
4|Page
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.xlabel("Feature 1")
plt.ylabel("Feature 2")
plt.show()
plot_decision_boundary(X, y, svm_classifier)
5|Page
3. Develop a simple case-based reasoning system that stores instances of past cases.
Implement a retrieval method to find the most similar cases and make
predictions based on them.
import numpy as np
class CaseBasedReasoning:
self.case_base.append((np.array(features), label))
query_features = np.array(query_features)
similar_cases = self.retrieve_similar(query_features, k)
6|Page
return max(set(labels), key=labels.count) # Majority vote
# Example usage
cbr = CaseBasedReasoning()
# Query case
query = [3, 3]
Output;
7|Page
4. Write a program to demonstrate the ID3 decision tree algorithm using an
appropriate dataset for classification.
import numpy as np
import pandas as pd
data = {
'Outlook': ['Sunny', 'Sunny', 'Overcast', 'Rain', 'Rain', 'Rain', 'Overcast', 'Sunny', 'Sunny', 'Rain',
'Sunny', 'Overcast', 'Overcast', 'Rain'],
'Temperature': ['Hot', 'Hot', 'Hot', 'Mild', 'Cool', 'Cool', 'Cool', 'Mild', 'Cool', 'Mild', 'Mild',
'Mild', 'Hot', 'Mild'],
'Humidity': ['High', 'High', 'High', 'High', 'Normal', 'Normal', 'Normal', 'High', 'Normal',
'Normal', 'Normal', 'High', 'Normal', 'High'],
'Wind': ['Weak', 'Strong', 'Weak', 'Weak', 'Weak', 'Strong', 'Strong', 'Weak', 'Weak', 'Weak',
'Strong', 'Strong', 'Weak', 'Strong'],
'PlayTennis': ['No', 'No', 'Yes', 'Yes', 'Yes', 'No', 'Yes', 'No', 'Yes', 'Yes', 'Yes', 'Yes', 'Yes', 'No']
df = pd.DataFrame(data)
8|Page
df = pd.get_dummies(df, columns=['Outlook', 'Temperature', 'Humidity', 'Wind'],
drop_first=True)
X = df.drop(columns=['PlayTennis'])
y = df['PlayTennis']
dt_classifier.fit(X_train, y_train)
# Make predictions
y_pred = dt_classifier.predict(X_test)
# Calculate accuracy
print(f"Accuracy: {accuracy:.4f}")
plt.figure(figsize=(12, 8))
plt.show()
9|Page
Output:
10 | P a g e
5. Build an Artificial Neural Network by implementing the Backpropagation
algorithm and test it with suitable datasets.
import numpy as np
# Generate dataset
# Normalize data
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(x):
return x * (1 - x)
input_size = 2
hidden_size = 4
output_size = 1
learning_rate = 0.1
11 | P a g e
theta1 = np.random.randn(input_size, hidden_size)
epochs = 10000
# Forward propagation
hidden_output = sigmoid(hidden_input)
final_output = sigmoid(final_input)
# Compute error
# Backpropagation
if epoch % 1000 == 0:
loss = np.mean(np.abs(error))
12 | P a g e
print(f"Epoch {epoch}, Loss: {loss:.4f}")
Z = Z.reshape(xx.shape)
plt.show()
13 | P a g e
Output:
14 | P a g e
6. Implement a KNN algorithm for regression tasks instead of classification. Use a
small dataset, and predict continuous values based on the average of the nearest
neighbors.
import numpy as np
np.random.seed(42)
# Split dataset
# Standardize features
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
class KNNRegressor:
self.k = k
15 | P a g e
self.X_train = None
self.y_train = None
self.X_train = X
self.y_train = y
y_pred = []
for x in X:
k_neighbors = np.argsort(distances)[:self.k]
y_pred.append(np.mean(self.y_train[k_neighbors]))
return np.array(y_pred)
knn_regressor = KNNRegressor(k=5)
knn_regressor.fit(X_train, y_train)
y_pred = knn_regressor.predict(X_test)
# Evaluate model
# Plot results
16 | P a g e
plt.scatter(X_test, y_test, color='blue', label='Actual values')
plt.xlabel("X")
plt.ylabel("y")
plt.legend()
plt.show()
Output:
17 | P a g e
7. Create a program that calculates different distance metrics (Euclidean and
Manhattan) between two points in a dataset. Allow the user to input two points
and display the calculated distances.
import numpy as np
# Calculate distances
# Display results
Output:
18 | P a g e
8. Implement the k-Nearest Neighbor algorithm to classify the Iris dataset, printing
both correct and incorrect predictions.
import numpy as np
import pandas as pd
data = load_iris()
X = data.data
y = data.target
# Standardize features
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
class KNNClassifier:
19 | P a g e
self.k = k
self.X_train = X
self.y_train = y
predictions = []
for x in X:
k_neighbors = np.argsort(distances)[:self.k]
predictions.append(labels[np.argmax(counts)])
return np.array(predictions)
knn = KNNClassifier(k=5)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
print("Correct Predictions:")
if correct[i]:
20 | P a g e
print(f"Index {i}: Actual = {actual}, Predicted = {pred}")
print("\nIncorrect Predictions:")
if not correct[i]:
print(f"\nAccuracy: {accuracy:.4f}")
Output:
Correct Predictions:
21 | P a g e
Index 12: Actual = 0, Predicted = 0
Incorrect Predictions:
Accuracy: 1.0000
22 | P a g e
9. Develop a program to implement the non-parametric Locally Weighted
Regression algorithm, fitting data points and visualizing results.
import numpy as np
np.random.seed(42)
# Split dataset
m = X_train.shape[0]
# Compute weights
# Design matrix
23 | P a g e
X_b = np.c_[np.ones((m, 1)), X_train] # Add bias term
query_x_b = np
24 | P a g e
10. Implement a Q-learning algorithm to navigate a simple grid environment,
defining the reward structure and analyzing agent performance.
import numpy as np
import random
GRID_SIZE = 5
EPISODES = 500
# Initialize Q-table
# Reward structure
x, y = state
if action == 'up':
25 | P a g e
x = max(x - 1, 0)
x = min(x + 1, GRID_SIZE - 1)
y = max(y - 1, 0)
y = min(y + 1, GRID_SIZE - 1)
return x, y
# Q-learning algorithm
def q_learning():
else:
reward = REWARDS[next_state]
# Update Q-value
26 | P a g e
Q_table[state[0], state[1], action_idx] += ALPHA * (reward + GAMMA *
best_next_action - Q_table[state[0], state[1], action_idx])
if episode % 100 == 0:
q_learning()
print("Learned Q-values:")
print(Q_table)
def visualize_policy():
for i in range(GRID_SIZE):
for j in range(GRID_SIZE):
else:
policy[i, j] = ACTIONS[action_idx][0].upper()
27 | P a g e
print("\nOptimal Policy:")
print(policy)
visualize_policy()
Output:
Episode 0 completed.
Learned Q-values:
28 | P a g e
[-7.42731579e-01 -7.78518187e-01 -8.30191893e-01 4.54976022e+01]
Optimal Policy:
29 | P a g e