0% found this document useful (0 votes)
18 views24 pages

Dy Ai Rec

Uploaded by

pullurishikhar
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
18 views24 pages

Dy Ai Rec

Uploaded by

pullurishikhar
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 24

1.

Write a program to implement Uninformed search techniques:


a. BFS
b. DFS

a. Breadth-First Search (BFS)


Program:
from collec ons import deque

def bfs(graph, start):

visited = set()

queue = deque([start])

while queue:

node = queue.pople ()

if node not in visited:

print(node, end=" ")

visited.add(node)

for neighbor in graph[node]:

if neighbor not in visited:

queue.append(neighbor)

if __name__ == "__main__":

# Dynamic input

graph = {}

num_edges = int(input("Enter number of edges: "))

for _ in range(num_edges):

u, v = input("Enter edge (u v): ").split()

if u not in graph:

graph[u] = []

if v not in graph:

graph[v] = []

graph[u].append(v)

graph[v].append(u) # If the graph is undirected

start_node = input("Enter the start node: ")

print("BFS traversal: ")

bfs(graph, start_node)
Output:
b. Depth-First Search (DFS)
Program:
def dfs(graph, start, visited=None):

if visited is None:

visited = set()

visited.add(start)

print(start, end=" ")

for neighbor in graph[start]:

if neighbor not in visited:

dfs(graph, neighbor, visited)

if __name__ == "__main__":

# Dynamic input

graph = {}

num_edges = int(input("Enter number of edges: "))

for _ in range(num_edges):

u, v = input("Enter edge (u v): ").split()

if u not in graph:

graph[u] = []

if v not in graph:

graph[v] = []

graph[u].append(v)

graph[v].append(u) # If the graph is undirected

start_node = input("Enter the start node: ")

print("DFS traversal: ")

dfs(graph, start_node)

Output:
2. Write a program to implement Informed search techniques
a. Greedy Best first search
b. A* algorithm

a. Greedy Best first search


Program:
import heapq

def greedy_best_first_search(graph, start, goal, heuris c):

open_list = []

heapq.heappush(open_list, (heuris c[start], start))

closed_list = set()

came_from = {start: None}

while open_list:

_, current = heapq.heappop(open_list)

if current == goal:

path = []

while current:

path.append(current)

current = came_from[current]

return path[::-1]

closed_list.add(current)

for neighbor in graph[current]:

if neighbor in closed_list:

con nue

if neighbor not in [i[1] for i in open_list]:

came_from[neighbor] = current

heapq.heappush(open_list, (heuris c[neighbor], neighbor))

return None

if __name__ == "__main__":
graph = {}

heuris c = {}

num_edges = int(input("Enter number of edges: "))

for _ in range(num_edges):

u, v = input("Enter edge (u v): ").split()

if u not in graph:

graph[u] = []

if v not in graph:

graph[v] = []

graph[u].append(v)

graph[v].append(u) # If the graph is undirected

num_nodes = int(input("Enter number of nodes for heuris c values: "))

for _ in range(num_nodes):

node, h = input("Enter node and heuris c value (node h): ").split()

heuris c[node] = int(h)

start_node = input("Enter the start node: ")

goal_node = input("Enter the goal node: ")

path = greedy_best_first_search(graph, start_node, goal_node, heuris c)

if path:

print("Path found:", " -> ".join(path))

else:

print("No path found.")

Output:
b. A* algorithm
Program:
import heapq

def a_star_search(graph, start, goal, heuris c):

open_list = []

heapq.heappush(open_list, (0 + heuris c[start], 0, start))

closed_list = set()

came_from = {start: None}

g_score = {start: 0}

while open_list:

_, current_g, current = heapq.heappop(open_list)

if current == goal:

path = []

while current:

path.append(current)

current = came_from[current]

return path[::-1]

closed_list.add(current)

for neighbor in graph[current]:

tenta ve_g_score = current_g + 1 # Assuming uniform cost for simplicity

if neighbor in closed_list and tenta ve_g_score >= g_score.get(neighbor, float('inf')):

con nue

if tenta ve_g_score < g_score.get(neighbor, float('inf')) or neighbor not in [i[2] for i in open_list]:

came_from[neighbor] = current

g_score[neighbor] = tenta ve_g_score

f_score = tenta ve_g_score + heuris c[neighbor]

heapq.heappush(open_list, (f_score, tenta ve_g_score, neighbor))

return None

if __name__ == "__main__":

graph = {}

heuris c = {}
num_edges = int(input("Enter number of edges: "))

for _ in range(num_edges):

u, v = input("Enter edge (u v): ").split()

if u not in graph:

graph[u] = []

if v not in graph:

graph[v] = []

graph[u].append(v)

graph[v].append(u) # If the graph is undirected

num_nodes = int(input("Enter number of nodes for heuris c values: "))

for _ in range(num_nodes):

node, h = input("Enter node and heuris c value (node h): ").split()

heuris c[node] = int(h)

start_node = input("Enter the start node: ")

goal_node = input("Enter the goal node: ")

path = a_star_search(graph, start_node, goal_node, heuris c)

if path:

print("Path found:", " -> ".join(path))

else:

print("No path found.")

Output:
3. Study of Prolog its facts, and rules.
a. Write simple facts for the statements and querying it.
b. Write a program for Family-tree.
Introduc on to Prolog:
Prolog (Programming in Logic) is a high-level programming language based on formal logic. It is widely used in
ar ficial intelligence and computa onal linguis cs. A Prolog program consists of facts, rules, and queries.

a. Simple Facts and Querying:


Facts
Facts represent basic asser ons about some world. For example:

% Facts about animals

cat(tom).

dog(bobby).

bird(tweety).

% Facts about ownership

owns(john, tom).

owns(sarah, bobby).

Queries:
Queries are used to retrieve informa on from the knowledge base.

% Query to check if 'tom' is a cat

?- cat(tom).

% Expected output: true

% Query to find out who owns 'tom'

?- owns(Who, tom).

% Expected output: Who = john.


b. Family-Tree Program
Facts
% Facts: parent rela onships

parent(john, mary).

parent(john, paul).

parent(susan, mary).

parent(susan, paul).

parent(mary, jenny).

parent(mary, jack).

parent(mike, jenny).

parent(mike, jack).

% Facts: gender

male(john).

male(paul).

male(jack).

male(mike).

female(susan).

female(mary).

female(jenny).

Rules:
% Rule: sibling

sibling(X, Y) :- parent(Z, X), parent(Z, Y), X \= Y.

% Rule: mother

mother(X, Y) :- parent(X, Y), female(X).

% Rule: father

father(X, Y) :- parent(X, Y), male(X).

% Rule: grandparent

grandparent(X, Y) :- parent(X, Z), parent(Z, Y).

% Rule: grandchild

grandchild(X, Y) :- grandparent(Y, X).

% Rule: ancestor

ancestor(X, Y) :- parent(X, Y).

ancestor(X, Y) :- parent(X, Z), ancestor(Z, Y).


Queries:
% Query to find out who are the parents of 'jenny'

?- parent(Who, jenny).

% Expected output: Who = mary ; Who = mike.

% Query to check if 'paul' and 'mary' are siblings

?- sibling(paul, mary).

% Expected output: true

% Query to find out the grandmother of 'jenny'

?- mother(Who, mary), parent(Who, jenny).

% Expected output: Who = susan

% Query to find out all ancestors of 'jack'

?- ancestor(Who, jack).

% Expected output: Who = mary ; Who = john ; Who = susan ; Who = mike.
4. Write a program to train and validate the following classifiers for given data (scikit-learn):
a. Decision Tree
b. Mul -layer Feed Forward neural network

a. Decision Tree
Program:
import numpy as np

import pandas as pd

from sklearn.datasets import load_iris

from sklearn.model_selec on import train_test_split

from sklearn.tree import DecisionTreeClassifier

from sklearn.metrics import accuracy_score, classifica on_report

# Load dataset

iris = load_iris()

X = iris.data

y = iris.target

# Split data into training and tes ng sets

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)

# Create and train Decision Tree classifier

clf = DecisionTreeClassifier(random_state=42)

clf.fit(X_train, y_train)

# Predict on test set

y_pred = clf.predict(X_test)

# Evaluate the classifier

accuracy = accuracy_score(y_test, y_pred)

report = classifica on_report(y_test, y_pred)

print(f"Decision Tree Classifier Accuracy: {accuracy}")

print("Classifica on Report:")

print(report)
Output:
b. Mul -layer Feed Forward neural network
Program:
from sklearn.neural_network import MLPClassifier

# Create and train MLP classifier

mlp = MLPClassifier(hidden_layer_sizes=(100,), max_iter=300, random_state=42)

mlp.fit(X_train, y_train)

# Predict on test set

y_pred_mlp = mlp.predict(X_test)

# Evaluate the classifier

accuracy_mlp = accuracy_score(y_test, y_pred_mlp)

report_mlp = classifica on_report(y_test, y_pred_mlp)

print(f"MLP Classifier Accuracy: {accuracy_mlp}")

print("Classifica on Report:")

print(report_mlp)

Output:
5. Text processing using NLTK
a. Remove stop words
b. implement stemming
c. POS (Parts of Speech) tagging

a. Remove stop words


Program:
# Tokenize the text

words = word_tokenize(text)

# Get English stop words

stop_words = set(stopwords.words('english'))

# Remove stop words

filtered_words = [word for word in words if word.lower() not in stop_words]

print("Original Words:", words)

print("Filtered Words:", filtered_words)

{Sample text:

text = "This is a simple example to demonstrate how to remove stop words from a given text."}

Output:
Original Words: ['This', 'is', 'a', 'simple', 'example', 'to', 'demonstrate', 'how', 'to', 'remove', 'stop', 'words', 'from', 'a',
'given', 'text', '.']

Filtered Words: ['This', 'simple', 'example', 'demonstrate', 'remove', 'stop', 'words', 'given', 'text', '.']

b. implement stemming
Program:
# Ini alize the PorterStemmer

stemmer = PorterStemmer()

# Stem the filtered words

stemmed_words = [stemmer.stem(word) for word in filtered_words]

print("Stemmed Words:", stemmed_words)

Output:
Filtered Words: ['This', 'simple', 'example', 'demonstrate', 'remove', 'stop', 'words', 'given', 'text', '.']

Stemmed Words: ['thi', 'simpl', 'exampl', 'demonstr', 'remov', 'stop', 'word', 'given', 'text', '.']
c. POS (Parts of Speech) tagging
Program:
# POS tagging for the original words

pos_tags = pos_tag(words)

print("POS Tags:", pos_tags)

Output:
Original Words: ['This', 'is', 'a', 'simple', 'example', 'to', 'demonstrate', 'how', 'to', 'remove', 'stop', 'words', 'from', 'a',
'given', 'text', '.']

Filtered Words: ['This', 'simple', 'example', 'demonstrate', 'remove', 'stop', 'words', 'given', 'text', '.']

Stemmed Words: ['thi', 'simpl', 'exampl', 'demonstr', 'remov', 'stop', 'word', 'given', 'text', '.']

POS Tags: [('This', 'DT'), ('is', 'VBZ'), ('a', 'DT'), ('simple', 'JJ'), ('example', 'NN'), ('to', 'TO'), ('demonstrate', 'VB'), ('how',
'WRB'), ('to', 'TO'), ('remove', 'VB'), ('stop', 'VB'), ('words', 'NNS'), ('from', 'IN'), ('a', 'DT'), ('given', 'VBN'), ('text', 'NN'),
('.', '.')]

Complete Code:
import nltk

from nltk.corpus import stopwords

from nltk.tokenize import word_tokenize

from nltk.stem import PorterStemmer

from nltk.tag import pos_tag

# Download necessary datasets

nltk.download('punkt')

nltk.download('stopwords')

nltk.download('averaged_perceptron_tagger')

# Sample text

text = "NLTK is a leading pla orm for building Python programs to work with human language data."

# Task 1: Remove Stop Words

# Tokenize the text

words = word_tokenize(text)

# Get English stop words

stop_words = set(stopwords.words('english'))
# Remove stop words

filtered_words = [word for word in words if word.lower() not in stop_words]

print("Original Words:", words)

print("Filtered Words:", filtered_words)

# Task 2: Implement Stemming

# Ini alize the PorterStemmer

stemmer = PorterStemmer()

# Stem the filtered words

stemmed_words = [stemmer.stem(word) for word in filtered_words]

print("Stemmed Words:", stemmed_words)

# Task 3: POS (Parts of Speech) Tagging

# POS tagging for the original words

pos_tags = pos_tag(words)

print("POS Tags:", pos_tags)

Output:
Original Words: ['NLTK', 'is', 'a', 'leading', 'pla orm', 'for', 'building', 'Python', 'programs', 'to', 'work', 'with', 'human',
'language', 'data', '.']

Filtered Words: ['NLTK', 'leading', 'pla orm', 'building', 'Python', 'programs', 'work', 'human', 'language', 'data', '.']

Stemmed Words: ['nltk', 'lead', 'pla orm', 'build', 'python', 'program', 'work', 'human', 'languag', 'data', '.']

POS Tags: [('NLTK', 'NNP'), ('is', 'VBZ'), ('a', 'DT'), ('leading', 'VBG'), ('pla orm', 'NN'), ('for', 'IN'), ('building', 'VBG'),
('Python', 'NNP'), ('programs', 'NNS'), ('to', 'TO'), ('work', 'VB'), ('with', 'IN'), ('human', 'JJ'), ('language', 'NN'), ('data',
'NNS'), ('.', '.')]
6. In addi on to the above programs, students should be encouraged to study implementa ons of one of
the following

 Game bot (Tic Tac toe, 7 puzzle)


 Expert system (Simple Medical Diagnosis)
 Text classifica on
 Chat bot

Game bot (Tic Tac toe, 7 puzzle):


Program:
import random

# Func on to print the board

def print_board(board):

for row in board:

print(" | ".join(row))

print("-" * 5)

# Func on to check for a win or e

def check_winner(board):

# Check rows, columns and diagonals

for row in board:

if row[0] == row[1] == row[2] != ' ':

return row[0]

for col in range(3):

if board[0][col] == board[1][col] == board[2][col] != ' ':

return board[0][col]

if board[0][0] == board[1][1] == board[2][2] != ' ':

return board[0][0]

if board[0][2] == board[1][1] == board[2][0] != ' ':

return board[0][2]

# Check for a e

if all(cell != ' ' for row in board for cell in row):

return 'Tie'

return None
# Func on for player's move

def player_move(board):

while True:

row = int(input("Enter the row (0, 1, or 2): "))

col = int(input("Enter the column (0, 1, or 2): "))

if board[row][col] == ' ':

board[row][col] = 'X'

break

else:

print("This cell is already occupied. Try again.")

# Func on for bot's move

def bot_move(board):

# Simple strategy: choose a random empty cell

empty_cells = [(r, c) for r in range(3) for c in range(3) if board[r][c] == ' ']

row, col = random.choice(empty_cells)

board[row][col] = 'O'

# Main game func on

def c_tac_toe():

board = [[' ' for _ in range(3)] for _ in range(3)]

print("Welcome to Tic-Tac-Toe!")

print_board(board)

while True:

# Player's move

player_move(board)

print_board(board)

winner = check_winner(board)

if winner:

print(f"{winner} wins!" if winner != 'Tie' else "It's a e!")

break

# Bot's move
bot_move(board)

print_board(board)

winner = check_winner(board)

if winner:

print(f"{winner} wins!" if winner != 'Tie' else "It's a e!")

break

if __name__ == "__main__":

c_tac_toe()

Output:
Expert system (Simple Medical Diagnosis) :
Program:
class ExpertSystem:

def __init__(self):

self.symptoms = []

self.condi ons = {

"headache": ["migraine", "tension headache"],

"cough": ["cold", "flu"],

"fever": ["flu", "infec on"],

"stomach pain": ["food poisoning", "stomach flu"]

def ask_ques ons(self):

print("Welcome to the Medical Diagnosis Expert System!")

print("Please answer the following ques ons (yes or no):")

for symptom in self.condi ons.keys():

answer = input(f"Do you have {symptom}? ").lower()

if answer == 'yes':

self.symptoms.append(symptom)

def diagnose(self):

print("\nBased on your symptoms, the possible medical condi ons could be:")

for symptom in self.symptoms:

for condi on in self.condi ons[symptom]:

print(f"- {condi on.capitalize()}")

print("Please consult a medical professional for a proper diagnosis and treatment.")

if __name__ == "__main__":

expert_system = ExpertSystem()

expert_system.ask_ques ons()

expert_system.diagnose()
Output:
Text classifica on :
Program:
import numpy as np

from sklearn.feature_extrac on.text import CountVectorizer

from sklearn.naive_bayes import Mul nomialNB

from sklearn.model_selec on import train_test_split

from sklearn.metrics import accuracy_score, classifica on_report

# Sample data

texts = ["buy now, limited me offer!", "hey, how are you?", "get free samples today!", "check out our new
products"]

labels = ["spam", "ham", "spam", "ham"]

# Split data into training and tes ng sets

X_train, X_test, y_train, y_test = train_test_split(texts, labels, test_size=0.2, random_state=42)

# Feature extrac on

vectorizer = CountVectorizer()

X_train_counts = vectorizer.fit_transform(X_train)

X_test_counts = vectorizer.transform(X_test)

# Train Naive Bayes classifier

clf = Mul nomialNB()

clf.fit(X_train_counts, y_train)

# Predict on test set

y_pred = clf.predict(X_test_counts)

# Evaluate classifier

accuracy = accuracy_score(y_test, y_pred)

report = classifica on_report(y_test, y_pred)

print("Accuracy:", accuracy)

print("Classifica on Report:")

print(report)
Output:
Chat bot:
class Chatbot:

def __init__(self):

self.responses = {

"hi": "Hello!",

"how are you?": "I'm good, thank you!",

"what is your name?": "I'm a chatbot. What can I do for you?",

"bye": "Goodbye! Have a nice day!",

"default": "I'm sorry, I didn't understand that. Can you please repeat?"

def get_response(self, user_input):

user_input = user_input.lower()

response = self.responses.get(user_input, self.responses["default"])

return response

if __name__ == "__main__":

chatbot = Chatbot()

print("Welcome to the Chatbot!")

print("You can start cha ng. Type 'bye' to exit.")

while True:

user_input = input("You: ")

if user_input.lower() == 'bye':

print(chatbot.get_response(user_input))

break

else:

print("Chatbot:", chatbot.get_response(user_input))

Output:

You might also like