Ai Journal
Ai Journal
ROLL NO: 18
PRACTICAL 1
1
TY BSC CS
ROLL NO: 18
OUTPUT: -
B) Implement the iterative Depth First Search algorithm to solve the same problem
CODE: -
import queue as Q
from RMP import dict_gn
start='Arad'
goal='Bucharest'
result=''
def DLS(city, visitedstack, startlimit, endlimit):
global result
found=0
result=result+city+' '
visitedstack.append(city)
if city==goal:
return 1
if startlimit==endlimit:
return 0
for eachcity in dict_gn[city].keys():
if eachcity not in visitedstack:
found=DLS(eachcity, visitedstack, startlimit+1, endlimit)
if found:
return found
def IDDFS(city, visitedstack, endlimit):
2
TY BSC CS
ROLL NO: 18
global result
for i in range(0, endlimit):
print("Searching at Limit: ",i)
found=DLS(city, visitedstack, 0, i)
if found:
print("Found")
break
else:
print("Not Found! ")
print(result)
print("-----")
result=' '
visitedstack=[]
def main():
visitedstack=[]
IDDFS(start, visitedstack, 9)
print("IDDFS Traversal from ",start," to ", goal," is: ")
print(result)
main()
OUTPUT: -
3
TY BSC CS
ROLL NO: 18
PRACTICAL 2
def get_fn(citystr):
cities=citystr.split(" , ")
hn=gn=0
for ctr in range(0, len(cities)-1):
gn=gn+dict_gn[cities[ctr]][cities[ctr+1]]
hn=dict_hn[cities[len(cities)-1]]
return(hn+gn)
def expand(cityq):
global result
tot, citystr, thiscity=cityq.get()
if thiscity==goal:
result=citystr+" : : "+str(tot)
return
for cty in dict_gn[thiscity]:
cityq.put((get_fn(citystr+" , "+cty), citystr+" , "+cty, cty))
expand(cityq)
4
TY BSC CS
ROLL NO: 18
def main():
cityq=Q.PriorityQueue()
thiscity=start
cityq.put((get_fn(start),start,thiscity))
expand(cityq)
print("The A* path with the total is: ")
print(result)
main()
OUTPUT: -
B) Implement the Recursive Best First Search algorithm for the same problem
CODE: -
import queue as Q
from RMP import dict_gn
from RMP import dict_hn
start='Arad'
goal='Bucharest'
result=''
def get_fn(citystr):
cities=citystr.split(',')
hn=gn=0
for ctr in range(0,len(cities)-1):
gn=gn+dict_gn[cities[ctr]][cities[ctr+1]]
hn=dict_hn[cities[len(cities)-1]]
return(hn+gn)
5
TY BSC CS
ROLL NO: 18
def printout(cityq):
for i in range(0,cityq.qsize()):
print(cityq.queue[i])
def expand(cityq):
global result
tot,citystr,thiscity=cityq.get()
nexttot=999
if not cityq.empty():
nexttot,nextcitystr,nextthiscity=cityq.queue[0]
if thiscity==goal and tot<nexttot:
result=citystr+'::'+str(tot)
return
print("Expanded city------------------------------",thiscity)
print("Second best f(n)------------------------------",nexttot)
tempq=Q.PriorityQueue()
for cty in dict_gn[thiscity]:
tempq.put((get_fn(citystr+','+cty),citystr+','+cty,cty))
for ctr in range(1,3):
ctrtot,ctrcitystr,ctrthiscity=tempq.get()
if ctrtot<nexttot:
cityq.put((ctrtot,ctrcitystr,ctrthiscity))
else:
cityq.put((ctrtot,citystr,thiscity))
break
printout(cityq)
expand(cityq)
def main():
cityq=Q.PriorityQueue()
thiscity=start
cityq.put((999,"NA","NA"))
cityq.put((get_fn(start),start,thiscity))
expand(cityq)
print(result)
main()
OUTPUT: -
6
TY BSC CS
ROLL NO: 18
7
TY BSC CS
ROLL NO: 18
PRACTICAL 3
AIM: - Implement the decision tree learning algorithm to build a decision tree for a given dataset. Evaluate
the accuracy and effectiveness of the decision tree on test data. Visualize and interpret the generative
decision tree.
CODE: -
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix, accuracy_score,classification_report
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, plot_tree
import matplotlib.pyplot as plt
# Function to import dataset
def importdata():
# Importing dataset from file path
balance_data = pd.read_csv("balanceScale.csv", header=None)
# Printing dataset information
print("Dataset Length: ", len(balance_data))
print("Dataset Head:\n", balance_data.head())
# Return the dataset
return balance_data
# Function to split the dataset
def splitdataset(balance_data):
# Separate the features (X) and target (Y)
X = balance_data.iloc[:, 1:5].values
Y = balance_data.iloc[:, 0].values
# Split the dataset into training and testing sets (70% train, 30% test)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3,
random_state=100)
return X_train, X_test, y_train, y_test
# Function to train the decision tree using entropy criterion
8
TY BSC CS
ROLL NO: 18
def train_using_entropy(X_train, y_train):
# Initialize Decision Tree with entropy criterion
clf_entropy = DecisionTreeClassifier(criterion="entropy", random_state=100,
max_depth=3, min_samples_leaf=5)
# Train the model
clf_entropy.fit(X_train, y_train)
return clf_entropy
# Function to make predictions on the test set
def prediction(X_test, clf_object):
# Predicting the labels for the test set
y_pred = clf_object.predict(X_test)
print("Predicted Values:\n", y_pred)
return y_pred
# Function to calculate and print the accuracy and classification report
def cal_accuracy(y_test, y_pred):
print("Accuracy: ", accuracy_score(y_test, y_pred) * 100)
print("Confusion Matrix:\n", confusion_matrix(y_test, y_pred))
print("Classification Report:\n", classification_report(y_test, y_pred))
# Function to visualize the decision tree
def visualize_tree(clf_object):
plt.figure(figsize=(6,4))
plot_tree(clf_object, filled=True, feature_names=["Feature 1", "Feature 2",
"Feature 3", "Feature 4"], class_names=["L", "B", "R"])
plt.title("Decision Tree Visualization")
plt.show()
# Main function
def main():
# Step 1: Import the dataset
data = importdata()
# Step 2: Split the dataset into training and testing sets
9
TY BSC CS
ROLL NO: 18
X_train, X_test, y_train, y_test = splitdataset(data)
# Step 3: Train the decision tree using entropy criterion
clf_entropy = train_using_entropy(X_train, y_train)
# Step 4: Predict the test set results
print("Results using entropy:")
y_pred_entropy = prediction(X_test, clf_entropy)
# Step 5: Calculate accuracy and display performance report
cal_accuracy(y_test, y_pred_entropy)
# Step 6: Visualize the decision tree
visualize_tree(clf_entropy)
# Running the main function
if __name__ == "__main__":
main()
10
TY BSC CS
ROLL NO: 18
OUTPUT: -
11
TY BSC CS
ROLL NO: 18
PRACTICAL 4
AIM: - Implement the Feed Forward Backpropagation algorithm to train a neural network
CODE: -
import numpy as np
class NeuralNetwork():
def __init__(self):
#seeding for random number generation
np.random.seed()
#converting weights to a 3 by 1 matrix
self.synaptic_weights=2*np.random.random((3,1))-1
#x is output variable
def sigmoid(self, x):
#applying the sigmoid function
return 1/(1+np.exp(-x))
def sigmoid_derivative(self,x):
#computing derivative to the sigmoid function
return x*(1-x)
def train(self,training_inputs,training_outputs,training_iterations):
#training the model to make accurate predictions while adjusting
for iteration in range(training_iterations):
#siphon the training data via the neuron
output=self.think(training_inputs)
error=training_outputs-output
#performing weight adjustments
adjustments=np.dot(training_inputs.T,error*self.sigmoid_derivative(output))
self.synaptic_weights+=adjustments
def think(self,inputs):
#passing the inputs via the neuron to get output
#converting values to floats
inputs=inputs.astype(float)
output=self.sigmoid(np.dot(inputs,self.synaptic_weights))
12
TY BSC CS
ROLL NO: 18
return output
if __name__=="__main__":
#initializing the neuron class
neural_network=NeuralNetwork()
print("Beginning randomly generated weights: ")
print(neural_network.synaptic_weights)
#training data consisting of 4 examples--3 inputs & 1 output
training_inputs=np.array([[0,0,1],[1,1,1],[1,0,1],[0,1,1]])
training_outputs=np.array([[0,1,1,0]]).T
#training taking place
neural_network.train(training_inputs,training_outputs,15000)
print("Ending weights after training: ")
print(neural_network.synaptic_weights)
user_input_one=str(input("User Input One: "))
user_input_two=str(input("User Input Two: "))
user_input_three=str(input("User Input Three: "))
print("Considering new situation: ",user_input_one,user_input_two,user_input_three)
print("New output data: ")
print(neural_network.think(np.array([user_input_one,user_input_two,user_input_three])))
OUTPUT: -
13
TY BSC CS
ROLL NO: 18
PRACTICAL 5
AIM: - Implement the SVM algorithm for binary classification. Train an SVM model using a given dataset
and optimize it’s parameters.
CODE: -
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import seaborn as sns
14
TY BSC CS
ROLL NO: 18
# Calculating accuracy
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy: {accuracy:.2f}")
# Confusion Matrix and Classification Report
print("Classification Report:\n", classification_report(y_test, y_pred))
OUTPUT: -
16
TY BSC CS
ROLL NO: 18
PRACTICAL 6
CODE: -
import pandas
from sklearn import model_selection
from sklearn.ensemble import AdaBoostClassifier
url = "https://raw.githubusercontent.com/jbrownlee/Datasets/master/pima-indians-
diabetes.data.csv"
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = pandas.read_csv(url, names=names)
array = dataframe.values
X = array[:,0:8]
Y = array[:,8]
seed = 7
num_trees = 30
#kfold makes trees with split number.
#kfold = model_selection.KFold(n_splits=10, random_state=seed)
#n_estimators : This is the number of trees you want to build before predictions.
#Higher number of trees give you better voting optionsand perfomance performance
model = AdaBoostClassifier(n_estimators=num_trees, random_state=seed)
#cross_val_score method is used to calculate the accuracy of model sliced into x, y
#cross validator cv is optional cv=kfold
results = model_selection.cross_val_score(model, X, Y)
print(results.mean())
OUTPUT: -
0.7617774382480265
17
TY BSC CS
ROLL NO: 18
PRACTICAL 7
AIM: - Implement the Naïve Bayes algorithm for classification. Train a Naïve Bayes model using a given
dataset and calculate class probability. Evaluate the accuracy of the model on test data and analyze the
result.
CODE: -
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score, confusion_matrix,classification_report
import seaborn as sns
# Sample dataset: You can replace this with your actual dataset
data = {
'Alt': ['Y', 'N', 'Y', 'N', 'Y', 'N', 'Y', 'N', 'Y', 'N', 'Y', 'N'],
'Est': ['0-10', '10-30', '30-60', '>60', '0-10', '10-30', '30-60', '>60', '0-10', '10-30',
'30-60', '>60'],
'Pat': ['S', 'N', 'F', 'S', 'N', 'F', 'S', 'N', 'F', 'S', 'N', 'F'],
'Type': ['F', 'I', 'B', 'T', 'F', 'I', 'B', 'T', 'F', 'I', 'B', 'T'],
'ans': ['Y', 'N', 'Y', 'N', 'Y', 'Y', 'N', 'N', 'Y', 'Y', 'N', 'N']
}
# Create DataFrame
df = pd.DataFrame(data)
# Convert categorical data to numerical
df_encoded = pd.get_dummies(df, drop_first=True)
# Features and target variable
X = df_encoded.drop('ans_Y', axis=1) # Features
y = df_encoded['ans_Y'] # Target variable (0: No, 1: Yes)
# Split the data into training and testing sets
18
TY BSC CS
ROLL NO: 18
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# Initialize the Naive Bayes model
model = GaussianNB()
# Train the model
model.fit(X_train, y_train)
# Make predictions
y_pred = model.predict(X_test)
# Evaluate the model
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy: {accuracy:.2f}")
# Confusion Matrix
conf_matrix = confusion_matrix(y_test, y_pred)
print("Confusion Matrix:")
print(conf_matrix)
# Classification Report
class_report = classification_report(y_test, y_pred)
print("Classification Report:")
print(class_report)
# Visualize the confusion matrix
plt.figure(figsize=(8, 6))
sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues',
xticklabels=['No', 'Yes'], yticklabels=['No', 'Yes'])
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.title('Confusion Matrix')
plt.show()
# Visualize the results
sns.countplot(x='ans', data=df, palette='Set2', legend=False)
plt.title('Distribution of Waiting Responses')
plt.xlabel('Will Wait (Y/N)')
19
TY BSC CS
ROLL NO: 18
plt.ylabel('Count')
plt.show()
OUTPUT: -
20
TY BSC CS
ROLL NO: 18
PRACTICAL 8
AIM: - Implement the KNN algorithm for classification or regression. Apply the KNN algorithm to a given
dataset and predict the class or value for test data. Evaluate the accuracy or error of the predictions and
analyze the results.
CODE: -
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use("ggplot")
for i, k in enumerate(neighbors):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
# Plotting accuracy
plt.title("KNN varying number of neighbors")
plt.plot(neighbors, test_accuracy, label="Test accuracy")
plt.plot(neighbors, train_accuracy, label="Train accuracy")
plt.legend()
plt.xlabel("Number of neighbors")
plt.ylabel("Accuracy")
plt.show()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_pred[:, i])
roc_auc[i] = roc_auc_score(y_test[:, i], y_pred[:, i])
23
TY BSC CS
ROLL NO: 18
OUTPUT: -
24