21 Dcs
21 Dcs
21DCS066
Practical 1
Date: 26/12/2023
Aim: Write a program for Water-Jug Problem using Depth First Search Tree and Breadth First
Search Tree in python
Code:
from collections import deque
steps = 0
while stack:
current_state = stack.pop()
visited.add(current_state)
steps += 1
print("State:", current_state)
next_states = [
(capacity_jug1, current_state[1]),
(current_state[0], capacity_jug2),
(0, current_state[1]),
(current_state[0], 0),
(min(current_state[0] + current_state[1], capacity_jug1), max(0, current_state[0] +
current_state[1] - capacity_jug1)), # Pour from jug 2 to jug 1
(max(0, current_state[0] + current_state[1] - capacity_jug2), min(current_state[0] +
current_state[1], capacity_jug2)) # Pour from jug 1 to jug 2
]
print("DFS Stats:")
print("Steps taken:", steps)
print("States visited:", len(visited))
return False
steps = 0
while queue:
current_state = queue.popleft()
visited.add(current_state)
steps += 1
print("State:", current_state)
next_states = [
(capacity_jug1, current_state[1]),
(current_state[0], capacity_jug2),
(0, current_state[1]),
(current_state[0], 0),
(min(current_state[0] + current_state[1], capacity_jug1), max(0, current_state[0] +
current_state[1] - capacity_jug1)), # Pour from jug 2 to jug 1
(max(0, current_state[0] + current_state[1] - capacity_jug2), min(current_state[0] +
current_state[1], capacity_jug2)) # Pour from jug 1 to jug 2
]
print("BFS Stats:")
print("Steps taken:", steps)
print("States visited:", len(visited))
return False
capacity_jug1 = 4
capacity_jug2 = 3
target = 2
2
CS364 –Modern Artificial Intelligence ID No. 21DCS066
Output Screenshot:
Conclusion/Summary: Both DFS and BFS are used to explore the state space of the Water Jug
Problem. DFS uses a stack to explore deeper into the state space, while BFS uses a queue to explore
all neighboring states before moving deeper. In the context of the Water Jug Problem, both algorithms
can find a solution if one exists. The choice between DFS and BFS depends on the specific
requirements of the problem and the characteristics of the state space.
3
CS364 –Modern Artificial Intelligence ID No. 21DCS066
Practical 2
Date: 2/1/2024
Aim: Write a Program to solve 8 puzzle problem using A* Algorithm in python
Code:
import heapq
class PuzzleNode:
def init (self, state, parent=None, move=None):
self.state = state
self.parent = parent
self.move = move
self.cost = self.get_cost()
def get_cost(self):
cost = 0
for i in range(3):
for j in range(3):
if self.state[i][j] != 0:
x, y = divmod(self.state[i][j] - 1, 3)
cost += abs(x - i) + abs(y - j)
return cost
def get_neighbours(self):
neighbours = []
blank_row, blank_col = self.get_blank_position()
for move in [(0, -1), (0, 1), (-1, 0), (1, 0)]:
new_row, new_col = blank_row + move[0], blank_col + move[1]
if 0 <= new_row < 3 and 0 <= new_col < 3:
new_state = [list(row) for row in self.state]
new_state[blank_row][blank_col], new_state[new_row][new_col] = \
new_state[new_row][new_col], new_state[blank_row][blank_col]
neighbours.append(PuzzleNode(new_state, parent=self, move=(new_row, new_col)))
return neighbours
def get_blank_position(self):
for i in range(3):
for j in range(3):
if self.state[i][j] == 0:
return i, j
return moves
def solve_puzzle(initial_state):
open_list = [PuzzleNode(initial_state)]
closed_list = set()
while open_list:
node = heapq.heappop(open_list)
if node.state == final_state:
return reconstruct_path(node)
return None
initial_state = [
[1, 2, 3],
[4, 0, 6],
[7, 5, 8]
]
final_state = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 0]
]
solution = solve_puzzle(initial_state)
if solution:
print("Solution found! Moves:", solution)
else:
print("No solution found.")
Output Screenshot:
Conclusion/Summary:
The A* algorithm efficiently solves the 8 puzzle problem by exploring possible moves based on a
heuristic function, in this case, the Manhattan distance. It maintains a priority queue to prioritize states
with lower total cost, which includes the cost of reaching the current state and the heuristic estimate of
reaching the goal state from that state. This implementation demonstrates how A* can find the optimal
solution to the 8 puzzle problem by iteratively exploring the state space until the goal state is reached.
5
CS364 –Modern Artificial Intelligence ID No. 21DCS066
Practical 3
Date: 6/2/2024
Aim: Write a program for game Tic-Tac-Toe using MINIMAX Algorithm in python.
Code:
import sys
def evaluate(board):
"""
Function to evaluate the current board and return the winner or the score.
"""
# Check rows
for row in range(3):
if board[row][0] == board[row][1] == board[row][2]:
if board[row][0] == player:
return 10
elif board[row][0] == opponent:
return -10
# Check columns
for col in range(3):
if board[0][col] == board[1][col] == board[2][col]:
if board[0][col] == player:
return 10
elif board[0][col] == opponent:
return -10
# Check diagonals
if board[0][0] == board[1][1] == board[2][2]:
if board[0][0] == player:
return 10
elif board[0][0] == opponent:
return -10
def is_moves_left(board):
"""
Function to check if there are any available moves left.
"""
for i in range(3):
6
CS364 –Modern Artificial Intelligence ID No. 21DCS066
for j in range(3):
if board[i][j] == '_':
return True
return False
if score == 10:
return score - depth
if score == -10:
return score + depth
if not is_moves_left(board):
return 0
if is_max:
best = -sys.maxsize
for i in range(3):
for j in range(3):
if board[i][j] == '_':
board[i][j] = player
best = max(best, minimax(board, depth + 1, not is_max))
board[i][j] = '_'
return best
else:
best = sys.maxsize
for i in range(3):
for j in range(3):
if board[i][j] == '_':
board[i][j] = opponent
best = min(best, minimax(board, depth + 1, not is_max))
board[i][j] = '_'
return best
def find_best_move(board):
"""
Function to find the best move using the Minimax algorithm.
"""
best_val = -sys.maxsize
best_move = (-1, -1)
for i in range(3):
for j in range(3):
if board[i][j] == '_':
board[i][j] = player
move_val = minimax(board, 0, False)
board[i][j] = '_'
best_val = move_val
best_move = (i, j)
return best_move
def print_board(board):
"""
Function to print the current board state.
"""
for row in board:
print(' '.join(row))
print()
def play_game():
"""
Function to play the Tic-Tac-Toe game.
"""
board = [['_', '_', '_'], ['_', '_', '_'], ['_', '_', '_']]
print("Initial Board:")
print_board(board)
while True:
print("Player's turn:")
x, y = map(int, input("Enter row and column (0-indexed) for your move: ").split())
if board[x][y] != '_':
print("Invalid move! Try again.")
continue
board[x][y] = player
print_board(board)
if evaluate(board) == 10:
print("Player wins!")
break
elif not is_moves_left(board):
print("It's a draw!")
break
print("Computer's turn:")
cx, cy = find_best_move(board)
board[cx][cy] = opponent
print_board(board)
if evaluate(board) == -10:
print("Computer wins!")
break
elif not is_moves_left(board):
print("It's a draw!")
break
Output Screenshot:
Conclusion/Summary:
In conclusion, the implemented Tic-Tac-Toe program showcases the effectiveness of the MINIMAX
algorithm in creating a challenging AI opponent. By recursively evaluating possible moves and their
outcomes, MINIMAX allows the AI player to make strategic decisions that maximize its chances of
winning or minimize the opponent's chances. This algorithm provides a balanced approach, ensuring
that the AI player responds intelligently to different game states. Overall, the program demonstrates
how MINIMAX enhances the gameplay experience by offering engaging and competitive matches
against human players.
9
CS364 –Modern Artificial Intelligence ID No. 21DCS066
Practical 4
Date: 27/2/2024
Aim: Implementation of Artificial Neural Network for XOR Logic Gate with 2-bit Binary Input
using backpropagation.
Code:
# import Python Libraries
import numpy as np
from matplotlib import pyplot as plt
# Sigmoid Function
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# Forward Propagation
def forwardPropagation(X, Y, parameters):
m = X.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
b1 = parameters["b1"]
b2 = parameters["b2"]
Z1 = np.dot(W1, X) + b1
A1 = sigmoid(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = sigmoid(Z2)
# Backward Propagation
def backwardPropagation(X, Y, cache):
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2) = cache
dZ2 = A2 - Y
dW2 = np.dot(dZ2, A1.T) / m
10
CS364 –Modern Artificial Intelligence ID No. 21DCS066
for i in range(epoch):
losses[i, 0], cache, A2 = forwardPropagation(X, Y, parameters)
gradients = backwardPropagation(X, Y, cache)
parameters = updateParameters(parameters, gradients, learningRate)
# Testing
X = np.array([[1, 1, 0, 0], [0, 1, 0, 1]]) # XOR input
cost, _, A2 = forwardPropagation(X, Y, parameters)
prediction = (A2 > 0.5) * 1.0
# print(A2)
print(prediction)
11
CS364 –Modern Artificial Intelligence ID No. 21DCS066
Output Screenshot:
Conclusion/Summary:
In summary, the implemented Artificial Neural Network for the XOR logic gate effectively
demonstrates the application of backpropagation to train the network. Despite XOR's non-linearity, the
network learns to approximate the desired outputs through iterative adjustments of weights and biases.
This showcases the power of neural networks in capturing complex relationships between inputs and
outputs. Through backpropagation, the network gradually converges towards accurate predictions,
showcasing its ability to learn and generalize from data. Overall, the implementation highlights the
versatility and effectiveness of neural networks in solving non-trivial tasks like XOR logic gate
approximation.
12
CS364 –Modern Artificial Intelligence ID No. 21DCS066
Practical 5
Date: 27/2/2024
Aim: Design Neural Network with Pima Indians Diabetes dataset using Keras.
Code:
import pandas as pd
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.layers import Dense
data = pd.read_csv("/pima-indians-diabetes.csv")
13
CS364 –Modern Artificial Intelligence ID No. 21DCS066
Output Screenshot:
Conclusion/Summary:
In this project, we designed and implemented a neural network using Keras to predict diabetes based on
the Pima Indians Diabetes dataset. The model was trained, evaluated, and optimized to achieve
satisfactory performance. By leveraging neural networks, we demonstrated the capability to make
accurate predictions for diabetes diagnosis, contributing to the field of healthcare and disease
management through machine learning techniques.
14
CS364 –Modern Artificial Intelligence ID No. 21DCS066
Practical 6
Date: 27/2/2024
Aim: Implementation of Text Pre-processing using NLTK (Tokenization, Stemming and
Lemmatization and removal of stop words) in NLP.
Code:
import nltk
nltk.download('all')
# Tokenization using NLTK
from nltk import word_tokenize, sent_tokenize
sent = "This is a great learning platform.\
It is one of the best for Computer Science students."
print(word_tokenize(sent))
print(sent_tokenize(sent))
from nltk.stem import PorterStemmer
# create an object of class PorterStemmer
porter = PorterStemmer()
print(porter.stem("play"))
print(porter.stem("studies"))
print(porter.stem("plays"))
print(porter.stem("played"))
from nltk.stem import WordNetLemmatizer
# create an object of class WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
print(lemmatizer.lemmatize("plays"))
print(lemmatizer.lemmatize("played"))
print(lemmatizer.lemmatize("studies"))
print(lemmatizer.lemmatize("playing"))
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
example_sent = """This is a sample sentence,
showing off the stop words filtration."""
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(example_sent)
# converts the words in word_tokens to lower case and then checks whether
#they are present in stop_words or not
filtered_sentence = [w for w in word_tokens if not w.lower() in stop_words]
#with no lower case conversion
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
print(word_tokens)
print(filtered_sentence)
15
CS364 –Modern Artificial Intelligence ID No. 21DCS066
Conclusion/Summary:
The implementation of text pre-processing using NLTK demonstrates the essential steps in preparing
textual data for natural language processing tasks. Tokenization breaks down the text into individual
words or tokens, making it easier to analyze. Stemming and lemmatization reduce words to their root
forms, aiding in standardization and reducing dimensionality. Removing stop words eliminates
common words that do not contribute much to the overall meaning of the text. Together, these
techniques help improve the efficiency and accuracy of NLP tasks by cleaning and normalizing the
input text data.
16
CS364 –Modern Artificial Intelligence ID No. 21DCS066
Practical 7
Date: 27/2/2024
Aim: Perform sentiment analysis of IMDB movie reviews using python.
Code:
from keras.datasets import imdb
from keras.preprocessing.text import Tokenizer
from keras.utils import pad_sequences
from keras import Sequential
from keras.layers import Dense, SimpleRNN, Embedding, Flatten
vocab = imdb.get_word_index()
print("Vocabulary size:", len(vocab))
model = Sequential()
model.add(Embedding(5000, 2, input_length=50)) # Corrected input dimension to match
num_words
model.add(SimpleRNN(100))
model.add(Dense(1, activation='sigmoid'))
model.summary()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.legend(['train', 'validation'])
plt.savefig("Model_Loss.jpg", dpi=600)
17
CS364 –Modern Artificial Intelligence ID No. 21DCS066
plt.show()
predictions = model.predict(X_test)
X_test.shape
predictions
cm = confusion_matrix(y_test, y_pred)
cr = classification_report(y_test, y_pred)
import seaborn as sn
plt.figure(figsize=(7,5))
sn.heatmap(cm, annot=True, fmt=' ')
plt.xlabel('Predicted')
plt.ylabel('Truth')
index = 0
X_test[index]
print(vocab)
text = []
for i in range(50):
for key, value in vocab.items():
if value == X_test[index][i]:
text.append(key)
print(text)
print("Label: ",y_test[index])
18
CS364 –Modern Artificial Intelligence ID No. 21DCS066
Output Screenshot:
19
CS364 –Modern Artificial Intelligence ID No. 21DCS066
Conclusion/Summary:
To perform sentiment analysis of IMDB movie reviews using Python, we utilized natural language processing
(NLP) techniques, including tokenization, text preprocessing, and machine learning. We employed a dataset of
IMDB movie reviews labeled with sentiments (positive or negative) and trained a machine learning model, such
as a recurrent neural network (RNN) or a support vector machine (SVM), to classify the sentiment of each review.
After training the model, we evaluated its performance using metrics such as accuracy, precision, recall, and F1-
score. Finally, we applied the trained model to analyze the sentiment of new, unseen movie reviews, providing
insights into audience reactions and opinions towards films.
20