From Collections Import Deque
From Collections Import Deque
initial_state = (0, 0)
visited = set([initial_state])
while queue:
(jug1, jug2), path = queue.popleft()
next_states = [
(capacity_x, jug2),
(jug1, capacity_y),
(0, jug2),
(jug1, 0),
return None
def print_solution(path):
if path:
print("Solution path:")
for state in path:
print(f"Jug 1: {state[0]}, Jug 2: {state[1]}")
else:
print("No solution found.")
capacity_x = 4
capacity_y = 3
goal = 2
print_solution(solution)
Output –
Solution path:
Jug 1: 0, Jug 2: 0
Jug 1: 4, Jug 2: 0
Jug 1: 1, Jug 2: 3
Jug 1: 1, Jug 2: 0
Jug 1: 0, Jug 2: 1
Jug 1: 4, Jug 2: 1
Jug 1: 2, Jug 2: 3
if c_right > 0:
new_state = (m_left, c_left + 1, 0)
if is_valid_state(*new_state, m_right, c_right - 1):
next_states.append(new_state)
if c_right > 1:
new_state = (m_left, c_left + 2, 0)
if is_valid_state(*new_state, m_right, c_right - 2):
next_states.append(new_state)
if m_right > 0 and c_right > 0:
new_state = (m_left + 1, c_left + 1, 0)
if is_valid_state(*new_state, m_right - 1, c_right - 1):
next_states.append(new_state)
return next_states
def bfs():
start_state = (3, 3, 0) # 3 missionaries and 3 cannibals on the left side, boat on the left
goal_state = (0, 0, 1) # All on the right side
queue = deque([(start_state, [])])
visited = set([start_state])
while queue:
current_state, path = queue.popleft()
if current_state == goal_state:
return path
for next_state in get_possible_states(current_state):
if next_state not in visited:
visited.add(next_state)
queue.append((next_state, path + [next_state]))
return None
solution = bfs()
if solution:
print("Solution path:")
for state in solution:
print(state)
else:
print("No solution found.")
Output –
Solution path:
(3, 3, 0)
(2, 3, 1)
(2, 2, 0)
(2, 1, 1)
(3, 1, 0)
(3, 0, 1)
(2, 0, 0)
(1, 0, 1)
(1, 1, 0)
(1, 2, 1)
(0, 2, 0)
(0, 3, 1)
from collections import deque
def is_valid_state(m_left, c_left, m_right, c_right):
if (m_left < c_left and m_left > 0) or (m_right < c_right and m_right > 0):
return False
return True
def get_possible_states(state):
m_left, c_left, boat_pos = state
m_right = 3 - m_left
c_right = 3 - c_left
next_states = []
if boat_pos == 0:
if m_left > 0:
new_state = (m_left - 1, c_left, 1)
if is_valid_state(*new_state, m_right, c_right):
next_states.append(new_state)
if m_left > 1:
new_state = (m_left - 2, c_left, 1)
if is_valid_state(*new_state, m_right, c_right):
next_states.append(new_state)
if c_left > 0:
new_state = (m_left, c_left - 1, 1)
if is_valid_state(*new_state, m_right, c_right):
next_states.append(new_state)
if c_left > 1:
new_state = (m_left, c_left - 2, 1)
if is_valid_state(*new_state, m_right, c_right):
next_states.append(new_state)
if m_left > 0 and c_left > 0:
new_state = (m_left - 1, c_left - 1, 1)
if is_valid_state(*new_state, m_right, c_right):
next_states.append(new_state)
else:
if m_right > 0:
new_state = (m_left + 1, c_left, 0)
if is_valid_state(*new_state, m_right - 1, c_right):
next_states.append(new_state)
if m_right > 1:
new_state = (m_left + 2, c_left, 0)
if is_valid_state(*new_state, m_right - 2, c_right):
next_states.append(new_state)
if c_right > 0:
new_state = (m_left, c_left + 1, 0)
if is_valid_state(*new_state, m_right, c_right - 1):
next_states.append(new_state)
if c_right > 1:
new_state = (m_left, c_left + 2, 0)
if is_valid_state(*new_state, m_right, c_right - 2):
next_states.append(new_state)
if m_right > 0 and c_right > 0:
new_state = (m_left + 1, c_left + 1, 0)
if is_valid_state(*new_state, m_right - 1, c_right - 1):
next_states.append(new_state)
return next_states
def bfs():
start_state = (3, 3, 0)
goal_state = (0, 0, 1)
queue = deque([(start_state, [])])
visited = set([start_state])
while queue:
current_state, path = queue.popleft()
if current_state == goal_state:
return path
for next_state in get_possible_states(current_state):
if next_state not in visited:
visited.add(next_state)
queue.append((next_state, path + [next_state]))
return None
solution = bfs()
if solution:
print("Solution path:")
for state in solution:
print(state)
else:
print("No solution found.")
Output –
DFS traversal (recursive) starting from node 0: 0 1 3 4 2
import heapq
class Node:
def __init__(self, name, heuristic):
self.name = name # Node name or identifier
self.heuristic = heuristic # Heuristic value
self.neighbors = [] # List to store neighboring nodes
def add_neighbor(self, neighbor):
self.neighbors.append(neighbor)
def __lt__(self, other):
return self.heuristic < other.heuristic
def best_first_search(start, goal):
open_list = [] # Priority queue for nodes to explore
heapq.heappush(open_list, start)
visited = set() # Set to keep track of visited nodes
while open_list:
current_node = heapq.heappop(open_list) # Get node with lowest heuristic
if current_node in visited:
continue
print(f"Visiting Node: {current_node.name} with heuristic: {current_node.heuristic}")
if current_node == goal:
print(f"Goal Node {goal.name} reached!")
return
visited.add(current_node)
# Add neighbors to open list
for neighbor in current_node.neighbors:
if neighbor not in visited:
heapq.heappush(open_list, neighbor)
start_node = Node("Start", 10)
goal_node = Node("Goal", 0)
nodeA = Node("A", 7)
nodeB = Node("B", 4)
nodeC = Node("C", 3)
nodeD = Node("D", 5)
start_node.add_neighbor(nodeA)
start_node.add_neighbor(nodeB)
nodeA.add_neighbor(nodeC)
nodeB.add_neighbor(nodeD)
nodeC.add_neighbor(goal_node)
nodeD.add_neighbor(goal_node)
best_first_search(start_node, goal_node)
Output –
import heapq
class Node:
def __init__(self, name, x, y, goal=None):
self.name, self.x, self.y = name, x, y
self.g, self.f, self.parent = float('inf'), float('inf'), None
self.heuristic = abs(x - goal.x) + abs(y - goal.y) if goal else 0
self.neighbors = []
def __lt__(self, other):
return self.f < other.f
def reconstruct_path(goal):
path = []
while goal:
path.append(goal.name)
goal = goal.parent
return path[::-1]
def a_star_search(start, goal):
open_list = [start]
start.g, start.f = 0, start.heuristic
while open_list:
current = heapq.heappop(open_list)
if current == goal:
print("Path:", reconstruct_path(current))
return
for neighbor in current.neighbors:
if neighbor.g > current.g + 1:
neighbor.g, neighbor.f, neighbor.parent = current.g + 1, current.g + 1 +
neighbor.heuristic, current
if neighbor not in open_list:
heapq.heappush(open_list, neighbor)
print("No path found.")
start, goal = Node("Start", 0, 0), Node("Goal", 4, 4)
nodes = {name: Node(name, x, y, goal) for name, (x, y) in
[("A", (1, 0)), ("B", (0, 1)), ("C", (1, 1)), ("D", (2, 1)),
("E", (3, 1)), ("F", (2, 2)), ("G", (3, 2)), ("H", (3, 3))]}
start.neighbors = [nodes["A"], nodes["B"]]
nodes["A"].neighbors = [nodes["C"]]
nodes["B"].neighbors = [nodes["C"]]
nodes["C"].neighbors = [nodes["D"], nodes["F"]]
nodes["D"].neighbors = [nodes["E"]]
nodes["E"].neighbors = [nodes["G"]]
nodes["F"].neighbors = [nodes["G"]]
nodes["G"].neighbors = [nodes["H"]]
nodes["H"].neighbors = [goal]
# Run A* search
a_star_search(start, goal)
Output –
Path: [‘Start’, ‘B’, ‘C’, ‘F’, ‘G’, ‘H’, ‘Goal’]
import random
def hill_climbing(initial_state, get_neighbors, evaluate):
current_state = initial_state
current_value = evaluate(current_state)
while True:
neighbors = get_neighbors(current_state)
if not neighbors:
break
next_state = max(neighbors, key=lambda state: evaluate(state))
next_value = evaluate(next_state)
if next_value <= current_value:
break
current_state = next_state
current_value = next_value
return current_state, current_value
def evaluate(x):
return -(x - 3)**2 + 5
def get_neighbors(x):
return [x - 1, x + 1] # Neighbors will be one less and one more than current state
initial_state = 0 # Start at x = 0
best_state, best_value = hill_climbing(initial_state, get_neighbors, evaluate)
print(f"Best state: {best_state}")
print(f"Best value: {best_value}")
Output –
Best state: 3
Best value: 5
Class MeansEndAnalysis:
Def __init__(self, initial_state, goal_state):
Self.current_state = initial_state
Self.goal_state = goal_state
Def goal_test(self):
Return self.current_state == self.goal_state
Def best_action(self):
Actions = [
(“add_3”, self.current_state + 3),
(“subtract_2”, self.current_state – 2)
]
Return min(actions, key=lambda x: abs(x[1] – self.goal_state))
Def solve(self):
Steps = []
While not self.goal_test():
Action, new_state = self.best_action()
Self.current_state = new_state
Steps.append(f”Action: {action}, New State: {self.current_state}”)
Steps.append(f”Goal reached: {self.current_state}”)
Return steps
Solver = MeansEndAnalysis(5, 12)
For step in solver.solve():
Print(step)
Output –
Action: add_3, New State: 8
Action: add_3, New State: 11
Action: add_3, New State: 12
Goal reached: 12
# Min-Max Algorithm Implementation
def minimax(depth, is_max_turn, node_values):
if depth == 3: # Leaf nodes are at depth 3
return node_values
if is_max_turn:
best = -float('inf') # Initially, the best value is very low for the maximizing player
for i in range(2): # Explore both child nodes
val = minimax(depth + 1, False, node_values) # Recurse for the minimizing player's
best = max(best, val)
return best
else:
best = float('inf') # Initially, the best value is very high for the minimizing player
for i in range(2): # Explore both child nodes
val = minimax(depth + 1, True, node_values) # Recurse for the maximizing player's
best = min(best, val)
return best
leaf_node_values = [3, 5, 2, 9, 4, 6, 8, 1] # Terminal nodes values
optimal_value = minimax(0, True, leaf_node_values)
print("The optimal value is:", optimal_value)
Output –
Output –
Output –