Py Lab Programs
Py Lab Programs
bfs
graph={
'A':['B','C'],
'B':['D','E'],
'C':['F'],
'D':[],
'E':['F'],
'F':[]
}
visited=[]
queue=[]
def bfs(visted,graph,node):
visted.append(node)
queue.append(node)
while queue:
S=queue.pop(0)
print(S,end=" ")
for neighbour in graph[S]:
if neighbour not in visted:
visted.append(neighbour)
queue.append(neighbour)
bfs(visited,graph,'A')
output:-
ABCDEF
2.dfs
graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}
visited = set()
output:-
3.8-puzzle
import copy
from heapq import heappush, heappop
n=3
row = [1, 0, -1, 0]
col = [0, -1, 0, 1]
class priorityQueue:
def __init__(self):
self.heap = []
def pop(self):
return heappop(self.heap)
def empty(self):
if not self.heap:
return True
else:
return False
class node:
def __init__(self, parent, mat, empty_tile_pos,
cost, level):
self.parent = parent
self.mat = mat
self.empty_tile_pos = empty_tile_pos
self.cost = cost
self.level = level
return count
def printMatrix(mat):
for i in range(n):
for j in range(n):
print("%d " % (mat[i][j]), end=" ")
print()
def printPath(root):
if root is None:
return
printPath(root.parent)
printMatrix(root.mat)
print()
if isSafe(new_tile_pos[0], new_tile_pos[1]):
child = newNode(minimum.mat,
minimum.empty_tile_pos,
new_tile_pos,
minimum.level + 1,
minimum, final, )
pq.push(child)
output:-
1 2 3
5 6 0
7 8 4
1 2 3
5 0 6
7 8 4
1 2 3
5 8 6
7 0 4
1 2 3
5 8 6
0 7 4
4.n-queens
output:-
[[1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1,
0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0]]
5.alpha-beta
if maximizingPlayer:
best = MIN
return best
else:
best = MAX
return best
if __name__ == "__main__":
output:-
6.forward-chaining
output:-
*-----Forward--Chaining-----*
X is
1..Croaks
2.Eat Flies
3.shrimps
4.Sings
Select One 1
Chance Of Frog
X is Croaks
Select Option 1
7.backward-chaining
output:-
*-----Backward--Chaining-----*
X is
1.frog
2.Canary
Select One 2
Chance of shrimpig
X is Eat Flies
8.knn
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
iris_data = pd.read_csv(file_path)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
knn=KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train,y_train)
y_pred = knn.predict(X_test)
output:-
Accuracy: 0.9866666666666667
9.linear-regression
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
np.random.seed(0)
X = 2 * np.random.rand(100, 1)
y = 4 + 3 * X + np.random.rand(100, 1)
model = LinearRegression()
model.fit(X, y)
X_new = np.array([[0], [2]])
y_pred =model.predict(X_new)
plt.scatter(X, y, color='blue')
plt.plot(X_new, y_pred, color='red')
plt.xlabel('X')
plt.ylabel('y')
plt.title('Linear Regression')
plt.show()
output:-
10.NB
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score, confusion_matrix,classification_report
file_path = "C:\\Users\\mcom13\\Downloads\\Iris (1).csv"
iris_data = pd.read_csv("C:\\Users\\mcom13\\Downloads\\Iris (1).csv")
X = iris_data.iloc[:, :-1].values
y = iris_data.iloc[:, -1].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
print(len(y_train))
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
naive_bayes = GaussianNB()
naive_bayes.fit(X_train, y_train)
y_pred = naive_bayes.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy)
conf_matrix = confusion_matrix(y_test, y_pred)
print("Confusion Matrix:")
print(conf_matrix)
print(classification_report(y_test,y_pred))
output:-
120
Accuracy: 1.0
Confusion Matrix:
[[10 0 0]
[ 0 9 0]
[ 0 0 11]]
accuracy 1.00 30
11.SVM
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.datasets import load_iris
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
svm_classifier.fit(X_train, y_train)
y_pred = svm_classifier.predict(X_test)
output:-
Accuracy: 0.9777777777777777
Confusion Matrix:
[[48 0 0]
[ 0 41 3]
[ 0 0 43]]
12.classification
import tensorflow as tf
from tensorflow.keras.layers import Flatten, Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_dir = "C:\\Users\\com13\\Downloads\\archive.zip\\training_set"
validation_dir = "C:\\Users\\com13\\Downloads\\archive.zip\\test_set"
batch_size = 32
img_height = 150
img_width = 150
epochs = 10
train_datagen = ImageDataGenerator(rescale=1. / 255)
validation_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary'
)
validation_generator = validation_datagen.flow_from_directory(
validation_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary'
)
model = Sequential([
Flatten(input_shape=(img_height, img_width, 3)),
Dense(128, activation='relu'),
Dense(1, activation='sigmoid')
])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(
train_generator,
steps_per_epoch=train_generator.samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=validation_generator.samples // batch_size
)