Ai Exp 1-10
Ai Exp 1-10
action = table.get(tuple(percepts))
return action
def TABLE_DRIVEN_AGENT(percept):
percepts.append(percept)
action = LOOKUP(percepts, table)
return action
def run():
print('Action\tPercepts')
print(TABLE_DRIVEN_AGENT((loc_A, 'Dirty')), '\t', percepts)
print(TABLE_DRIVEN_AGENT((loc_A, 'Clean')), '\t', percepts)
print(TABLE_DRIVEN_AGENT((loc_B, 'Clean')), '\t', percepts)
run()
OUTPUT:
Action Percepts
Suck [('A', 'Dirty')]
Right [('A', 'Dirty'), ('A', 'Clean')]
None [('A', 'Dirty'), ('A', 'Clean'), ('B', 'Clean')]
SIMPLE REFLEX AGENT
Exp. No.: 02 - I Date:
SOURCE CODE:
# Define the agent function
def reflex_vacuum_agent(percept):
location, status = percept
# Execute the chosen action and update the environment and performance score
if action == 'suck':
environment[location] = 'clean'
score += 1
elif action == 'right':
if location == 'A':
location = 'B'
else:
location = 'A'
score -= 1
# Print the current state of the environment and the agent's performance
score
print("Location: {}, Environment: {}, Action: {}, Score: {}".format(
location, environment, action, score))
# Check if the environment is clean
if all(status == 'clean' for status in environment.values()):
print("Environment is clean")
break
OUTPUT:
Location: A, Environment: {'A': 'clean', 'B': 'dirty'}, Action: suck, Score: 1
Location: B, Environment: {'A': 'clean', 'B': 'dirty'}, Action: right, Score: 0
Location: B, Environment: {'A': 'clean', 'B': 'clean'}, Action: suck, Score: 1
Environment is clean
MODEL BASED REFLEX AGENT
Exp. No.: 02 - II Date:
SOURCE CODE:
def agent(location, status):
if status == "Dirty":
return "Suck"
elif location == 'A':
return "Right"
elif location == 'B':
return "Left"
def vacuum_cleaner():
location = 'A'
environment = {'A': 'Dirty', 'B': 'Dirty'}
score = 0
while True:
print("Location:", location)
print("Environment:", environment)
print("Score:", score)
if action == "Suck":
environment[location] = "Clean"
score += 1
elif action == "Right":
location = 'B'
score -= 1
elif action == "Left":
location = 'A'
score -= 1
vacuum_cleaner()
OUTPUT:
Location: A
Environment: {'A': 'Dirty', 'B': 'Dirty'}
Score: 0
Action: Suck
Location: A
Environment: {'A': 'Clean', 'B': 'Dirty'}
Score: 1
Action: Right
Location: B
Environment: {'A': 'Clean', 'B': 'Dirty'}
Score: 0
Action: Suck
All is clean.
Environment is clean with score: 1
TRAVELLING SALES MAN PROBLEM
Exp. No.: 03 Date:
SOURCE CODE:
import random
def hill_climbing(distances):
num_cities = len(distances)
current_path = list(range(num_cities))
random.shuffle(current_path)
current_distance = calculate_distance(current_path, distances)
while True:
found_better_path = False
for i in range(num_cities):
for j in range(i+1, num_cities):
if j == (i+1) % num_cities:
continue
new_path = current_path[:]
new_path[i], new_path[j] = new_path[j], new_path[i]
new_distance = calculate_distance(new_path, distances)
if new_distance < current_distance:
current_path = new_path
current_distance = new_distance
found_better_path = True
break
if found_better_path:
break
if not found_better_path:
break
return current_path, current_distance
OUTPUT 2:
Shortest path: [1, 0, 3, 2]
Shortest distance: 1400
8-PUZZLE PROBLEM
Exp. No.: 04 Date:
SOURCE CODE:
from queue import PriorityQueue
Step 1 :
1 2 3
4 0 5
7 8 6
Step 2 :
1 2 3
4 5 0
7 8 6
Step 3 :
1 2 3
4 5 6
7 8 0
A* SEARCH ALGORITHM
Exp. No.: 05 Date:
SOURCE CODE:
import heapq
# If we've exhausted all possible paths without reaching the goal, return None
return None
graph = {
(0, 0): {(0, 1): 1, (1, 0): 1},
(0, 1): {(0, 0): 1, (0, 2): 1},
(0, 2): {(0, 1): 1, (1, 2): 1},
(1, 0): {(0, 0): 1, (2, 0): 1},
(1, 2): {(0, 2): 1, (2, 2): 1},
(2, 0): {(1, 0): 1, (2, 1): 1},
(2, 1): {(2, 0): 1, (2, 2): 1},
(2, 2): {(1, 2): 1, (2, 1): 1},
}
start = (0, 0)
goal = (2, 2)
path = astar(graph, start, goal)
print('Shortest path :', path)
OUTPUT:
Shortest path : [(0, 0), (0, 1), (0, 2), (1, 2), (2, 2)]
TIC-TAC-TOE GAME USING MINIMAX ALGORITHM
Exp. No.: 06 Date:
SOURCE CODE:
import sys
def print_board():
print("---------")
for i in range(3):
print("|", end=" ")
for j in range(3):
print(board[i][j], end=" ")
print("|")
print("---------")
def is_move_left():
for i in range(3):
for j in range(3):
if board[i][j] == ' ':
return True
return False
def evaluate():
# Checking rows for victory
for row in range(3):
if board[row][0] == board[row][1] == board[row][2]:
if board[row][0] == PLAYER:
return 10
elif board[row][0] == OPPONENT:
return -10
# No winner
return 0
def find_best_move():
best_score = -sys.maxsize
best_move = (-1, -1)
for i in range(3):
for j in range(3):
if board[i][j] == ' ':
board[i][j] = PLAYER
score = minimax(0, False)
board[i][j] = ' '
return best_move
player_turn = True # True if it's player's turn, False if it's AI's turn
if player_turn:
row = int(input("Enter the row: "))
col = int(input("Enter the column: "))
board[row][col] = PLAYER
else:
print("AI's turn...")
move = find_best_move()
board[move[0]][move[1]] = OPPONENT
score = evaluate()
if score > 0:
print("Player wins!")
elif score < 0:
print("AI wins!")
else:
print("It's a tie!")
OUTPUT:
Tic-Tac-Toe Game
Enter the coordinates (row, col) to make a move.
Coordinates range from 0 to 2, top-left is (0, 0) and bottom-right is (2, 2).
---------
| |
| |
| |
---------
Enter the row: 0
Enter the column: 1
---------
| X |
| |
| |
---------
AI's turn...
---------
| O X |
| |
| |
---------
Enter the row: 1
Enter the column: 1
---------
| O X |
| X |
| |
---------
AI's turn...
---------
| O X |
| X |
| O |
---------
Enter the row: 2
Enter the column: 0
---------
| O X |
| X |
| X O |
---------
AI's turn...
---------
| O X O |
| X |
| X O |
---------
Enter the row: 1
Enter the column: 0
---------
| O X O |
| X X |
| X O |
---------
AI's turn...
---------
| O X O |
| X X O |
| X O |
---------
Enter the row: 0
Enter the column: 1
Invalid move. Try again.
---------
| O X O |
| X X O |
| X O |
---------
Enter the row: 2
Enter the column: 2
---------
| O X O |
| X X O |
| X O X |
---------
It's a tie!
MONTO-CARLO SEARCH TREE
Exp. No.: 07 Date:
SOURCE CODE:
import random
import math
class Node:
def __init__(self, value, parent=None):
self.value = value
self.parent = parent
self.children = []
self.wins = 0
self.visits = 0
def select_child(self):
exploration_constant = 1.414
selected_child = None
max_ucb = float("-inf")
return selected_child
def expand(self):
possible_values = self.get_possible_values()
if self.parent:
self.parent.update(result)
def get_possible_values(self):
possible_values = []
return possible_values
class MonteCarloTreeSearch:
def __init__(self, initial_state):
self.root = Node(initial_state)
best_child = self.root.select_child()
return best_child.value
def selection(self):
node = self.root
while node.children:
if all(child.visits for child in node.children):
node = node.select_child()
else:
return self.expand(node)
return node
return sum(current_node.value)
initial_value = []
mcts = MonteCarloTreeSearch(initial_value)
simulations = 1000
best_value = mcts.run(simulations)
print("Best Value:", best_value)
OUTPUT:
import random
def monty_hall():
# Initialize the doors with car and goats
doors = ['car', 'goat', 'goat']
# Return True if the final choice has the car, False otherwise
return doors[final_choice] == 'car'
def simulate_monty_hall(num_trials):
wins_with_switch = 0
wins_without_switch = 0
for _ in range(num_trials):
if monty_hall():
wins_with_switch += 1
else:
wins_without_switch += 1
OUTPUT:
import numpy as np
class KalmanFilter:
def __init__(self, initial_state, initial_covariance, process_noise,
measurement_noise):
self.state = initial_state
self.covariance = initial_covariance
self.process_noise = process_noise
self.measurement_noise = measurement_noise
# Covariance prediction
Q = np.array([[0.25 * dt**4, 0.5 * dt**3],
[0.5 * dt**3, dt**2]])
self.covariance = np.dot(np.dot(F, self.covariance), F.T) + Q
# Update step
kalman_filter.update(measurement)
OUTPUT:
import numpy as np
from tabulate import tabulate
from hmmlearn import hmm
OUTPUT:
Fitting a model with 14 free scalar parameters with only 10 data points will result
in a degenerate solution.
+-------------------+--------------------+
| Observed prices | Future forecasts |
+===================+====================+
| 37.454 | 22.9751 |
+-------------------+--------------------+
| 95.0714 | 37.5188 |
+-------------------+--------------------+
| 73.1994 | 53.7053 |
+-------------------+--------------------+
| 59.8658 | 60.634 |
+-------------------+--------------------+
| 15.6019 | 71.5652 |
+-------------------+--------------------+
| 15.5995 | 55.7926 |
+-------------------+--------------------+
| 5.80836 | 69.8999 |
+-------------------+--------------------+
| 86.6176 | 71.0359 |
+-------------------+--------------------+
| 60.1115 | 67.5987 |
+-------------------+--------------------+
| 70.8073 | 84.4716 |
+-------------------+--------------------+