0% found this document useful (0 votes)
9 views9 pages

AIML

good

Uploaded by

resiw37887
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as RTF, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
9 views9 pages

AIML

good

Uploaded by

resiw37887
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as RTF, PDF, TXT or read online on Scribd
You are on page 1/ 9

EX.

NO:1 IMPLEMENTATION OF UNINFORMED SEARCH


ALGORITHMS (BFS,DFS)

PROGRAM:
graph={

'5':['3','7'],

'3':['2','4'],

'7':['8'],

'2':[],

'4':['8'],

'8':[]

visited=[]

queue=[]

def bfs(visited,graph,node):

visited.append(node)

queue.append(node)

while queue:

m=queue.pop(0)

print(m,end=" ")

for neighbour in graph[m]:

if neighbour not in visited:

visited.append(neighbour)

queue.append(neighbour)

print("following is the breadth first search")

bfs(visited,graph,'5')
OUTPUT :

following is the breadth-first search

537248

PROGRAM: DFS

graph={

'5':['3','7'],

'3':['2','4'],

'7':['8'],

'2':[],

'4':['8'],

'8':[]

visited=set()

def dfs(visited,graph,node):

if node not in visited:

print(node)

visited.add(node)

for neighbour in graph[node]:

dfs(visited,graph,neighbour)

print("following is the depth-first search")

dfs(visited,graph,'5')

OUTPUT:

following is the depth-first search


5

EX.NO:2 IMPLEMENTATION OF INFORMED SEARCH

ALGORITHM(A*)

PROGRAM:
import heapq

def a_star(graph,start,dest,heuristic):

distances={vertex:float('inf') for vertex in graph}

distances[start]=0

parent={vertex:None for vertex in graph}

visited=set()

pq=[(0+heuristic[start],0,start)]

while pq:

curr_f,curr_dist,curr_vert=heapq.heappop(pq)
if curr_vert not in visited:

visited.add(curr_vert)

for nbor,weight in graph[curr_vert].items():

distance=curr_dist+weight

f_distance=distance+heuristic[nbor]

if f_distance<distances[nbor]:

distances[nbor]=f_distance

parent[nbor]=curr_vert

if nbor==dest:

return distances,parent

heapq.heappush(pq,(f_distance,distance,nbor))

return distances,parent

def generate_path_from_parents(parent,start,dest):

path=[]

curr=dest

while curr:

path.append(curr)

curr=parent[curr]

return'->'.join(path[::-1])

graph={
'A':{'B':5,'C':5},

'B':{'A':5,'C':4,'D':3},

'C':{'A':5,'B':4,'D':7,'E':7,'H':8},

'D':{'B':3,'C':7,'H':11,'K':16,'L':13,'M':14},

'E':{'C':7,'F':4,'H':5},

'F':{'E':4,'G':9},

'G':{'F':9,'G':9},

'H':{'E':5,'C':8,'D':11,'I':3},

'I':{'H':3,'J':4},

'J':{'I':4,'N':3},

'K':{'D':16,'L':5,'P':4,'N':7},

'L':{'D':13,'M':9,'O':4,'K':5},

'M':{'D':14,'L':9,'O':5},

'N':{'G':12,'J':3,'P':7},

'O':{'M':5,'L':4},

'P':{'k':4,'J':8,'N':7},

heuristic={

'A':16,

'B':17,

'C':13,

'D':16,

'E':16,

'F':20,
'G':17,

'H':11,

'I':10,

'J':8,

'K':4,

'L':7,

'M':10,

'N':7,

'O':5,

'P':0

start='A'

dest='P'

distances,parent=a_star(graph,start,dest,heuristic)

print('distances=>',distances)

print('parent=>',parent)

print('Optimal path=>',generate_path_from_parents(parent,start,dest))

OUTPUT:
distances=> {'A': 0, 'B': 22, 'C': 18, 'D': 24, 'E': 28, 'F': 36, 'G': inf, 'H': 24, 'I': 26, 'J': 28, 'K': 28,
'L': 28, 'M': 32, 'N': 30, 'O': 30, 'P': 28}

parent=> {'A': None, 'B': 'A', 'C': 'A', 'D': 'B', 'E': 'C', 'F': 'E', 'G': None, 'H': 'C', 'I': 'H', 'J': 'I',
'K': 'D', 'L': 'D', 'M': 'D', 'N': 'J', 'O': 'L', 'P': 'K'}

Optimal path=> A->B->D->K->P

EX NO: 3 NAIVE BAYES MODEL


PROGRAM:

import numpy as np

from sklearn.naive_bayes import GaussianNB, MultinomialNB

from sklearn.feature_extraction.text import TfidfVectorizer

def naive_bayes(X_train, y_train, X_test, text=False):

"""

Implements a Naive Bayes model using GaussianNB or MultinomialNB

depending on data type (numerical or text).

Args:

X_train (numpy.ndarray): Training data features.

y_train (numpy.ndarray): Training data labels.

X_test (numpy.ndarray): Test data features.

text (bool, optional): Flag indicating text data. Defaults to False.

Returns:

tuple: A tuple containing the trained model and predicted labels.

"""

if text:

# Preprocess text data for better accuracy

vectorizer = TfidfVectorizer()

X_train = vectorizer.fit_transform(X_train)

X_test = vectorizer.transform(X_test)

# Use MultinomialNB for text classification


model = MultinomialNB()

else:

# Use GaussianNB for numerical features

model = GaussianNB()

model.fit(X_train, y_train)

y_pred = model.predict(X_test)

return model, y_pred

# Sample usage (assuming numerical features)

X_train = np.array([[3, 5], [1, 1], [2, 8], [4, 8], [5, 1]])

y_train = np.array([1, 0, 1, 0, 1])

X_test = np.array([[3, 4], [0, 0], [5, 5]])

model, y_pred = naive_bayes(X_train, y_train, X_test)

print("Predicted labels:", y_pred)

# Example usage for text data (replace with your text data)

text_train = ["This is a spam email", "I like this product", "This movie is not good"]

text_test = ["Very urgent! Click this link", "Recommend a good movie"]

y_train = [1, 0, 0] # 1 for spam, 0 for not spam

model, y_pred = naive_bayes(text_train, y_train, text_test, text=True)

print("Predicted labels for text data:", y_pred)


OUTPUT:

Predicted labels: [1 0 1]

Predicted labels for text data: [0 0]

You might also like