Practical No 1: Aim:Breadth First Search & Iterative Depth First Search
Practical No 1: Aim:Breadth First Search & Iterative Depth First Search
C:\Users\Faraz\AppData\Local\Programs\Python\Python311\Scripts
Practical No 1
Aim:Breadth First Search & Iterative Depth First Search
● Implement the Breadth First Search algorithm to solve a given problem.
● Implement the Iterative Depth First Search algorithm to solve the same problem.
● Compare the performance and efficiency of both algorithms.
dict_gn=dict(
Arad=dict(Zerind=75,Timisoara=118,Sibiu=140),
Bucharest=dict(Urziceni=85,Giurgiu=90,Pitesti=101,Fagaras=211),
Craiova=dict(Drobeta=120,Pitesti=138,Rimnicu=146),
Drobeta=dict(Mehadia=75,Craiova=120),
Eforie=dict(Hirsova=86),
Fagaras=dict(Sibiu=99,Bucharest=211),
Giurgiu=dict(Bucharest=90),
Hirsova=dict(Eforie=86,Urziceni=98),
Iasi=dict(Neamt=87,Vaslui=92),
Lugoj=dict(Mehadia=70,Timisoara=111),
Mehadia=dict(Lugoj=70,Drobeta=75),
Neamt=dict(Iasi=87),
Oradea=dict(Zerind=71,Sibiu=151),
Pitesti=dict(Rimnicu=97,Bucharest=101,Craiova=138),
Rimnicu=dict(Sibiu=80,Pitesti=97,Craiova=146),
Sibiu=dict(Rimnicu=80,Fagaras=99,Arad=140,Oradea=151),
Timisoara=dict(Lugoj=111,Arad=118),
Urziceni=dict(Bucharest=85,Hirsova=98,Vaslui=142),
Vaslui=dict(Iasi=92,Urziceni=142),
Zerind=dict(Oradea=71,Arad=75)
)
import queue as Q
#from RMP import dict_hn
start='Arad'
goal='Bucharest'
result=''
def main():
cityq=Q.Queue()
visitedq=Q.Queue()
BFS(start, cityq, visitedq)
print("BFS Traversal from ",start," to ",goal," is: ")
print(result)
main()
Output of bfs:
dict_gn=dict(
Arad=dict(Zerind=75,Timisoara=118,Sibiu=140),
Bucharest=dict(Urziceni=85,Giurgiu=90,Pitesti=101,Fagaras=211),
Craiova=dict(Drobeta=120,Pitesti=138,Rimnicu=146),
Drobeta=dict(Mehadia=75,Craiova=120),
Eforie=dict(Hirsova=86),
Fagaras=dict(Sibiu=99,Bucharest=211),
Giurgiu=dict(Bucharest=90),
Hirsova=dict(Eforie=86,Urziceni=98),
Iasi=dict(Neamt=87,Vaslui=92),
Lugoj=dict(Mehadia=70,Timisoara=111),
Mehadia=dict(Lugoj=70,Drobeta=75),
Neamt=dict(Iasi=87),
Oradea=dict(Zerind=71,Sibiu=151),
Pitesti=dict(Rimnicu=97,Bucharest=101,Craiova=138),
Rimnicu=dict(Sibiu=80,Pitesti=97,Craiova=146),
Sibiu=dict(Rimnicu=80,Fagaras=99,Arad=140,Oradea=151),
Timisoara=dict(Lugoj=111,Arad=118),
Urziceni=dict(Bucharest=85,Hirsova=98,Vaslui=142),
Vaslui=dict(Iasi=92,Urziceni=142),
Zerind=dict(Oradea=71,Arad=75)
)
start='Arad'
goal='Bucharest'
result=''
def DFS(city):
global result
stack = [city]
visited = set()
while stack:
current_city = stack.pop()
visited.add(current_city)
result += current_city + ' '
if current_city == goal:
return
neighbors = dict_gn[current_city]
unvisited_neighbors = [neighbor for neighbor in neighbors if neighbor not in visited]
stack.extend(unvisited_neighbors[::-1])
def main():
DFS(start)
print("DFS Traversal from", start, "to", goal, "is:")
print(result)
main()
Output:
`
Practical No 2
Aim: A* Search and Recursive Best-First Search
● Implement the A* Search algorithm for solving a pathfinding problem.
● Implement the Recursive Best-First Search algorithm for the same problem.
● Compare the performance and effectiveness of both algorithms.
dict_gn=dict(
Arad=dict(Zerind=75,Timisoara=118,Sibiu=140),
Bucharest=dict(Urziceni=85,Giurgiu=90,Pitesti=101,Fagaras=211),
Craiova=dict(Drobeta=120,Pitesti=138,Rimnicu=146),
Drobeta=dict(Mehadia=75,Craiova=120),
Eforie=dict(Hirsova=86),
Fagaras=dict(Sibiu=99,Bucharest=211),
Giurgiu=dict(Bucharest=90),
Hirsova=dict(Eforie=86,Urziceni=98),
Iasi=dict(Neamt=87,Vaslui=92),
Lugoj=dict(Mehadia=70,Timisoara=111),
Mehadia=dict(Lugoj=70,Drobeta=75),
Neamt=dict(Iasi=87),
Oradea=dict(Zerind=71,Sibiu=151),
Pitesti=dict(Rimnicu=97,Bucharest=101,Craiova=138),
Rimnicu=dict(Sibiu=80,Pitesti=97,Craiova=146),
Sibiu=dict(Rimnicu=80,Fagaras=99,Arad=140,Oradea=151),
Timisoara=dict(Lugoj=111,Arad=118),
Urziceni=dict(Bucharest=85,Hirsova=98,Vaslui=142),
Vaslui=dict(Iasi=92,Urziceni=142),
Zerind=dict(Oradea=71,Arad=75)
)
import queue as Q
#from RMP import dict_gn
#from RMP import dict_hn
start='Arad'
goal='Bucharest'
result=''
def get_fn(citystr):
cities=citystr.split(" , ")
hn=gn=0
for ctr in range(0, len(cities)-1):
gn=gn+dict_gn[cities[ctr]][cities[ctr+1]]
hn=dict_hn[cities[len(cities)-1]]
return(hn+gn)
def expand(cityq):
global result
tot, citystr, thiscity=cityq.get()
if thiscity==goal:
result=citystr+" : : "+str(tot)
return
for cty in dict_gn[thiscity]:
cityq.put((get_fn(citystr+" , "+cty), citystr+" , "+cty, cty))
expand(cityq)
def main():
cityq=Q.PriorityQueue()
thiscity=start
cityq.put((get_fn(start),start,thiscity))
expand(cityq)
print("The A* path with the total is: ")
print(result)
main()
Output of A* algorithm:
dict_gn=dict(
Arad=dict(Zerind=75,Timisoara=118,Sibiu=140),
Bucharest=dict(Urziceni=85,Giurgiu=90,Pitesti=101,Fagaras=211),
Craiova=dict(Drobeta=120,Pitesti=138,Rimnicu=146),
Drobeta=dict(Mehadia=75,Craiova=120),
Eforie=dict(Hirsova=86),
Fagaras=dict(Sibiu=99,Bucharest=211),
Giurgiu=dict(Bucharest=90),
Hirsova=dict(Eforie=86,Urziceni=98),
Iasi=dict(Neamt=87,Vaslui=92),
Lugoj=dict(Mehadia=70,Timisoara=111),
Mehadia=dict(Lugoj=70,Drobeta=75),
Neamt=dict(Iasi=87),
Oradea=dict(Zerind=71,Sibiu=151),
Pitesti=dict(Rimnicu=97,Bucharest=101,Craiova=138),
Rimnicu=dict(Sibiu=80,Pitesti=97,Craiova=146),
Sibiu=dict(Rimnicu=80,Fagaras=99,Arad=140,Oradea=151),
Timisoara=dict(Lugoj=111,Arad=118),
Urziceni=dict(Bucharest=85,Hirsova=98,Vaslui=142),
Vaslui=dict(Iasi=92,Urziceni=142),
Zerind=dict(Oradea=71,Arad=75)
)
start = 'Arad'
goal = 'Bucharest'
result = []
if city == goal:
result.append(city)
return True
successors = []
for next_city in dict_gn[city].keys():
cost = dict_gn[city][next_city]
h_cost = dict_hn[next_city]
successors.append((next_city, max(cost, h_cost)))
if best_f >f_limit:
return False
successors.pop(0)
result.append(best)
result.pop()
return False
def main():
if RBFS(start, float('inf')):
print("RBFS path from ", start, "to ", goal, "is: " )
print(' -> '.join(result))
else:
print("No path found from ", start, "to ", goal)
main()
Code:
#numpy and pandas initialization
import numpy as np
import pandas as pd
# Check the column names to ensure they match your CSV file
print(PlayTennis.columns)
Le = LabelEncoder()
# Strip leading and trailing spaces from column names and then encode
PlayTennis.columns = PlayTennis.columns.str.strip()
PlayTennis['outlook'] = Le.fit_transform(PlayTennis['outlook'])
PlayTennis['temp'] = Le.fit_transform(PlayTennis['temp'])
PlayTennis['humidity'] = Le.fit_transform(PlayTennis['humidity'])
PlayTennis['windy'] = Le.fit_transform(PlayTennis['windy'])
PlayTennis['play'] = Le.fit_transform(PlayTennis['play'])
print(PlayTennis)
y = PlayTennis['play']
x = PlayTennis.drop(['play'], axis=1)
import graphviz
from sklearn import tree
graph = graphviz.Source(dot_data)
graph.render("decision_tree") #save the graph as "decision_tree.pdf"
print(x_pred == y)
Output:
Practical No 4
Aim: Feed Forward Backpropagation Neural Network
● Implement the Feed Forward Backpropagation algorithm to train a neural
network.
● Use a given dataset to train the neural network for a specific task.
● Evaluate the performance of the trained network on test data.
Code:
import numpy as np
class NeuralNetwork():
def __init__(self):
np.random.seed(1)
self.synaptic_weights = 2 * np.random.random((3, 1)) - 1
if __name__ == "__main__":
neural_network = NeuralNetwork()
print("Beginning Randomly Generated Weights:")
print(neural_network.synaptic_weights)
training_inputs = np.array([[0, 0, 1],
[1, 1, 1],
[1, 0, 1],
[0, 1, 1]])
training_outputs = np.array([[0, 1, 1, 0]]).T
neural_network.train(training_inputs, training_outputs, 15000)
print("Ending Weights After Training:")
print(neural_network.synaptic_weights)
user_input_one = float(input("User Input One: "))
user_input_two = float(input("User Input Two: "))
user_input_three = float(input("User Input Three: "))
print("Considering New Situation:", user_input_one, user_input_two, user_input_three)
print("New Output data:")
print(neural_network.think(np.array([user_input_one, user_input_two, user_input_three])))
Output:
Practical No 5
Aim: Support Vector Machines (SVM)
● Implement the SVM algorithm for binary classification.
● Train an SVM model using a given dataset and optimize its parameters.
● Evaluate the performance of the SVM model on test data and analyze the results.
Code:
from warnings import filterwarnings
filterwarnings("ignore")
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import statsmodels.formula.api as sm
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split,cross_val_score,cross_val_predict
from sklearn.decomposition import PCA
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import scale
from sklearn import model_selection
from sklearn.metrics import roc_auc_score,roc_curve
from sklearn import preprocessing
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix,accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import
RandomForestClassifier,BaseEnsemble,GradientBoostingClassifier,GradientBoostingRegressor
from sklearn.svm import SVC, LinearSVC
import time
from matplotlib.colors import ListedColormap
from xgboost import XGBRegressor
from skompiler import skompile
from lightgbm import LGBMRegressor
print(pd.set_option('display.max_rows',1000))
print(pd.set_option('display.max_columns',1000))
print(pd.set_option('display.width',1000))
df = pd.read_csv("C:/faraz/sem5/AI/dibates.csv")
print(df.head())
print(df.shape)
print(df.describe())
x = df.drop("Outcome",axis=1)
y = df["Outcome"] # We will predict Outcome(diabetes)
x_train = x.iloc[:600]
x_test = x.iloc[600:]
y_train = y[:600]
y_test = y[600:]
support_vector_classifier = SVC(kernel="linear").fit(x_train,y_train)
support_vector_classifier
SVC(kernel='linear')
# Default C
support_vector_classifier.C
support_vector_classifier
SVC(kernel='linear')
y_pred = support_vector_classifier.predict(x_test)
cm = confusion_matrix(y_test,y_pred)
print(cm)
print("Our Accuracy is: ", (cm[0][0]+cm[1][1])/(cm[0][0]+cm[1][1]+cm[0][1]+cm[1][0]))
accuracy_score(y_test,y_pred)
print(classification_report(y_test,y_pred))
support_vector_classifier
SVC(kernel='linear')
accuracies = cross_val_score(estimator=support_vector_classifier, X=x_train, y=y_train, cv=10)
support_vector_classifier.predict(x_test)[:10]
print(np.array([[0,0,0,1,1,0,1,0,1,0]]))
svm_params = {"C":np.arange(1,20)}
svm = SVC(kernel='linear')
svm_cv = GridSearchCV(svm,svm_params,cv=8)
start_time= time.time()
svm_cv.fit(x_train,y_train)
elapsed_time = time.time() - start_time
#best parameters
svm_cv.best_params_
{'C': 2}
svm_tuned = SVC(kernel='linear',C = 2).fit(x_train,y_train)
svm_tuned
SVC(C = 2, kernel='linear')
y_pred = svm_tuned.predict(x_test)
cm = confusion_matrix(y_test,y_pred)
print(cm)
print("our Accurarcy is:", (cm[0][0]+cm[1][1])/(cm[0][0]+cm[1][1]+cm[0][1]+cm[1][0]))
accuracy_score(y_test,y_pred)
print(classification_report(y_test,y_pred))
Output:
Practical No 6
Aim: Adaboost Ensemble Learning
● Implement the Adaboost algorithm to create an ensemble of weak classifiers.
● Train the ensemble model on a given dataset and evaluate its performance.
● Compare the results with individual weak classifiers.
Code:
import pandas
from sklearn import model_selection
from sklearn.ensemble import AdaBoostClassifier
url =
"https://raw.githubusercontent.com/jbrownlee/Datasets/master/pima-indians-diabetes.data.csv"
names = ['preg','plas','pres','skin','test','mass','pedi','age','class']
dataframe = pandas.read_csv(url,names=names)
array = dataframe.values
X = array[:,0:8]
Y = array[:,8]
seed = 7
num_trees = 30
#Kfold makes trees with split numbers.
#kfold = model_selection.KFold(n_splits=10,rondom_state=seed)
#n_estimators : This is the number of trees you want to build before predictions.
#Higher number of trees give you better voting options and performance
model = AdaBoostClassifier(n_estimators=num_trees,random_state=seed)
#cross_val_score method is used to calculate the accuracy of model sliced into x,y
#cross validator cv is optional cv = kfold
results = model_selection.cross_val_score(model,X,Y)
print(results.mean())
Output:
Practical No 7
Aim: Naive Bayes' Classifier
● Implement the Naive Bayes' algorithm for classification.
● Train a Naive Bayes' model using a given dataset and calculate class
probabilities.
● Evaluate the accuracy of the model on test data and analyze the results.
Code:
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB, CategoricalNB, GaussianNB
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix,
precision_score, recall_score, f1_score
import seaborn as sns
df = pd.read_csv("D:/tycs/AI/heart.csv")
print(df.head(11))
print(df.tail())
print(df.info())
le=LabelEncoder()
df['Sex']=le.fit_transform(df['Sex'])
df['ChestPainType']=le.fit_transform(df['ChestPainType'])
df['RestingECG']=le.fit_transform(df['RestingECG'])
df['MaxHR']=le.fit_transform(df['MaxHR'])
df['ExerciseAngina']=le.fit_transform(df['ExerciseAngina'])
df['Diagnosis']=le.fit_transform(df['Diagnosis'])
print(df.info())
print(df.head(11))
#setting the dimensions of the plot
fig,ax=plt.subplots(figsize=(6,6))
sns.countplot(x=df['Sex'],data=df)
plt.title("Category wise count of 'Sex'")
plt.xlabel("category")
plt.ylabel("Count")
plt.show()
fig,ax=plt.subplots(figsize=(6,6))
sns.countplot(x=df['ChestPainType'],data=df)
plt.title("Category wise count of 'ChestPainType'")
plt.xlabel("category")
plt.ylabel("Count")
plt.show()
fig,ax=plt.subplots(figsize=(6,6))
sns.countplot(x=df["RestingECG"],data=df)
plt.title("Category wise count of 'RestingECG'")
plt.xlabel("category")
plt.ylabel("Count")
plt.show()
fig.ax=plt.subplots(figsize=(6,6))
sns.countplot(x=df['Diagnosis'],data=df)
plt.title("Category wise count of 'Diagnosis'")
plt.xlabel("category")
plt.ylabel("Count")
plt.show()
X = df.drop('Diagnosis',axis=1)
y = df['Diagnosis']
classifier = MultinomialNB()
classifier.fit(X,y)
classifier = CategoricalNB()
classifier.fit(X,y)
classifier = GaussianNB()
classifier.fit(X,y)
x_train,x_test,y_train,y_test=train_test_split(X,y,test_size = 0.2)
y_pred = classifier.predict(x_test)
print("Confusion matrix",confusion_matrix(y_test,y_pred))
print("Accuracy:",accuracy_score(y_test,y_pred))
print("Recall:",recall_score(y_test,y_pred))
print("F1 score:",f1_score(y_test,y_pred))
print("classification report:",classification_report(y_test,y_pred))
Output:
Practical No 8
Aim: K-Nearest Neighbors (K-NN)
● Implement the K-NN algorithm for classification or regression.
● Apply the K-NN algorithm to a given dataset and predict the class or value for
test data.
● Evaluate the accuracy or error of the predictions and analyze the results.
df = pd.read_csv('C:/tycs/AI/dibates.csv')
print(df.head())
print(df.shape)
print(df.dtypes)
x = df.drop('Outcome', axis=1).values
y = df['Outcome'].values
for i, k in enumerate(neighbors):
# Setup a knn classifier with k neighbors
knn = KNeighborsClassifier(n_neighbors=k)
# Fit the model
knn.fit(x_train, y_train)
# Compute accuracy on the training
train_accuracy[i] = knn.score(x_train, y_train)
# Compute accuracy on the test set
test_accuracy[i] = knn.score(x_test, y_test)
confusion_matrix(y_test, y_pred)
print(classification_report(y_test, y_pred))
y_pred_proba = knn.predict_proba(x_test)[:, 1]
knn = KNeighborsClassifier()
knn_cv = GridSearchCV(knn, param_grid, cv=5)
print(knn_cv.fit(x_train, y_train))
print(knn_cv.best_score_)
print(knn_cv.best_params_)
Practical No 9
Aim:Association Rule Mining
● Implement the Association Rule Mining algorithm (e.g., Apriori) to find frequent
itemsets.
● Generate association rules from the frequent itemsets and calculate their support
and confidence.
● Interpret and analyze the discovered association rules.
Code:
import os
for dirname,_,filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname,filename))
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
print(df.isnull().any())
all_products = df['itemDescription'].unique()
print("Total products: {}".format(len(all_products)))
def distribution_plot(x,y,name=None,xaxis=None,yaxis=None):
fig = go.Figure([
go.Bar(x=x, y=y)
])
fig.update_layout(
title_text = name,
xaxis_title = xaxis,
yaxis_title = yaxis
)
fig.show()
x = df['itemDescription'].value_counts()
x = x.sort_values(ascending=False)
x = x[:10]
one_hot =pd.get_dummies(df['itemDescription'])
df.drop('itemDescription',inplace=True, axis=1)
df = df.join(one_hot)
print(df.head())
records = df.groupby(["Member_number","Date"])[all_products[:]].apply(sum)
records = records.reset_index()[all_products]
def get_Pnames(x):
for product in all_products:
if x[product] > 0:
x[product] = product
return x
records = records.apply(get_Pnames,axis=1)
print(records.head())
print("total transactions:{}".format(len(records)))
x = records.values
x = [sub[~(sub == 0)].tolist() for sub in x if sub[sub != 0].tolist()]
transactions = x
transactions[0:10]
print(transactions[0:10])
Output:
Practical No 10
Aim: Demo of OpenAI/TensorFlow Tools
● Explore and experiment with OpenAI or TensorFlow tools and libraries.
● Perform a demonstration or mini-project showcasing the capabilities of the tools.
● Discuss and present the findings and potential applications.
Code:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input, decode_predictions
import numpy as np
import matplotlib.pyplot as plt
model = MobileNetV2(weights='imagenet')
img_path = "D:/Downloads/wallpaperflare.com_wallpaper.jpg"
img = image.load_img(img_path, target_size=(224, 224))
img_array = image.img_to_array(img)
img_array = preprocess_input(img_array)
Output: