0% found this document useful (0 votes)
51 views20 pages

Artificial Intelligence Lab Record

Gbkjgjll
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
51 views20 pages

Artificial Intelligence Lab Record

Gbkjgjll
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 20

SHREE MEDHA DEGREE COLLEGE, BALLARI

1. Python program on Problem Solving by Searching: Breadth-first


Search.

from collections import deque

# Define the graph as an adjacency list


graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}

def bfs(graph, start):


visited = set()
queue = deque([start])
visited.add(start)

while queue:
node = queue.popleft()
print(node, end=' ')

for neighbor in graph[node]:


if neighbor not in visited:
visited.add(neighbor)
queue.append(neighbor)

# Example usage:
print("BFS traversal starting from node 'A':")
bfs(graph, 'A')

OUTPUT:

BFS traversal starting from node 'A':


ABCDEF

DEPT. OF COMPUTER SCIENCE Page No. _____


SHREE MEDHA DEGREE COLLEGE, BALLARI

2. Python program on Problem Solving by Searching: Depth-first Search.

# Define the graph as an adjacency list


graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}

def dfs(graph, start, visited=None):


if visited is None:
visited = set()

visited.add(start)
print(start, end=' ')

for neighbor in graph[start]:


if neighbor not in visited:
dfs(graph, neighbor, visited)

# Example usage:
print("DFS traversal starting from node 'A':")
dfs(graph, 'A')

OUTPUT:

DFS traversal starting from node 'A':


ABDEFCF

DEPT. OF COMPUTER SCIENCE Page No. _____


SHREE MEDHA DEGREE COLLEGE, BALLARI

3. Python program on Problem Solving by Searching: Greedy best-first


search.

from queue import PriorityQueue

# Define the graph as an adjacency list with heuristic values


graph = {
'A': {'B': 3, 'C': 7},
'B': {'D': 2, 'E': 5},
'C': {'F': 4},
'D': {},
'E': {'F': 1},
'F': {}
}

def greedy_best_first_search(graph, start, goal):


frontier = PriorityQueue()
frontier.put(start, 0)
came_from = {}
came_from[start] = None

while not frontier.empty():


current = frontier.get()

if current == goal:
break

for next_node in graph[current]:


if next_node not in came_from:
priority = graph[next_node]
frontier.put(next_node, priority)
came_from[next_node] = current

return came_from

def reconstruct_path(came_from, start, goal):


current = goal
path = []
while current != start:
path.append(current)
current = came_from[current]
path.append(start)
path.reverse()
return path

DEPT. OF COMPUTER SCIENCE Page No. _____


SHREE MEDHA DEGREE COLLEGE, BALLARI

# Example usage:
start_node = 'A'
goal_node = 'F'
came_from = greedy_best_first_search(graph, start_node, goal_node)
path = reconstruct_path(came_from, start_node, goal_node)
print("Greedy Best-First Search path from", start_node, "to", goal_node, ":", path)

OUTPUT:

Greedy Best-First Search path from A to F:

[‘A’,’C’,’F’]

DEPT. OF COMPUTER SCIENCE Page No. _____


SHREE MEDHA DEGREE COLLEGE, BALLARI

4. Python program on Problem Solving by Searching: A* Search.

import heapq
# Define the graph as an adjacency list with heuristic values
graph = {
'A': {'B': 3, 'C': 7},
'B': {'D': 2, 'E': 5},
'C': {'F': 4},
'D': {},
'E': {'F': 1},
'F': {}
}

def heuristic(node, goal):


# Assuming heuristic is the distance from node to goal
heuristic_values = {'A': 10, 'B': 8, 'C': 5, 'D': 4, 'E': 3, 'F': 0}
return heuristic_values[node]

def astar_search(graph, start, goal):


frontier = []
heapq.heappush(frontier, (0, start))
came_from = {}
cost_so_far = {}
came_from[start] = None
cost_so_far[start] = 0

while frontier:
current_cost, current_node = heapq.heappop(frontier)

if current_node == goal:
break

for next_node, weight in graph[current_node].items():


new_cost = cost_so_far[current_node] + weight
if next_node not in cost_so_far or new_cost < cost_so_far[next_node]:
cost_so_far[next_node] = new_cost
priority = new_cost + heuristic(next_node, goal)
heapq.heappush(frontier, (priority, next_node))
came_from[next_node] = current_node

return came_from, cost_so_far

def reconstruct_path(came_from, start, goal):


current = goal
path = []

DEPT. OF COMPUTER SCIENCE Page No. _____


SHREE MEDHA DEGREE COLLEGE, BALLARI

while current != start:


path.append(current)
current = came_from[current]
path.append(start)
path.reverse()
return path

# Example usage:
start_node = 'A'
goal_node = 'F'
came_from, cost_so_far = astar_search(graph, start_node, goal_node)
path = reconstruct_path(came_from, start_node, goal_node)
print("A* Search path from", start_node, "to", goal_node, ":", path)
print("Cost of the path:", cost_so_far[goal_node])

OUTPUT:

A* Search path from: [‘A’,’B’,’E’,’F’]


Cost of the path: 9

DEPT. OF COMPUTER SCIENCE Page No. _____


SHREE MEDHA DEGREE COLLEGE, BALLARI

5. Python program on Problem Solving by Searching: AO* search


Informed (Heuristic) Search Strategies.

class Graph:
def __init__(self):
self.graph = {}
def add_edge(self, u, v, cost):
if u not in self.graph:
self.graph[u] = []
self.graph[u].append((v, cost))
def ao_star_search(self, start, goal):
open_list = [(0, start)]
closed_set = set()
g_values = {start: 0}
parents = {start: None} # To store parent nodes for path reconstruction
while open_list:
_, current_node = min(open_list)
open_list.remove((_, current_node))
closed_set.add(current_node)
if current_node == goal:
# Reconstruct path
path = []
while current_node is not None:
path.append(current_node)
current_node = parents[current_node]
path.reverse()
return path
for neighbor, weight in self.graph.get(current_node, []):
if neighbor not in closed_set:
g = g_values[current_node] + weight
h = self.heuristic(neighbor, goal)
f=g+h
if neighbor not in g_values or g < g_values[neighbor]:

DEPT. OF COMPUTER SCIENCE Page No. _____


SHREE MEDHA DEGREE COLLEGE, BALLARI

g_values[neighbor] = g
parents[neighbor] = current_node # Update parent node
open_list.append((f, neighbor))
return None # No path found
def heuristic(self, node, goal):
# Example heuristic: Euclidean distance
return ((node[0] - goal[0]) ** 2 + (node[1] - goal[1]) ** 2) ** 0.5
# Example usage
if __name__ == "__main__":
g = Graph()
g.add_edge((0, 0), (1, 0), 1)
g.add_edge((0, 0), (0, 1), 4)
g.add_edge((1, 0), (1, 1), 3)
g.add_edge((0, 1), (1, 1), 1)
g.add_edge((1, 1), (2, 1), 2)

start_node = (0, 0)
goal_node = (2 , 1)
#goal_node = (3, 3) # Set an unreachable goal for demonstration
path = g.ao_star_search(start_node, goal_node)
if path:
print("Path found:", path)
print("Goal Reached.")
else:
print("Unable to find path")
print("Goal is unreachable.")

OUTPUT:

Path found: [(0, 0), (1, 0), (1, 1), (2, 1)]
Goal Reached.

DEPT. OF COMPUTER SCIENCE Page No. _____


SHREE MEDHA DEGREE COLLEGE, BALLARI

6. Python program to demonstrate the Supervised machine learning.

# Import required libraries


import numpy as np
from sklearn.linear_model import LinearRegression
X = np.array([[1, 2], [2, 3], [3, 4], [4, 5]])
Y = np.dot(X, np.array([3, 2])) + 7
# Create a linear regression model
model = LinearRegression()
# Train the model using the input data and output labels
model.fit(X, Y)
# Now, let's predict the output for new input data
new_input = np.array([[5, 6]])
predicted_output = model.predict(new_input)
print(f"Predicted output for new input: {predicted_output[0]}")

OUTPUT:

Predicted output for new input: 34.0

DEPT. OF COMPUTER SCIENCE Page No. _____


SHREE MEDHA DEGREE COLLEGE, BALLARI

7. Python program to predict the price of the car using decision tree.

from sklearn.tree import DecisionTreeRegressor


from sklearn.model_selection import train_test_split
import numpy as np
# Sample car data (replace with your actual data)
data = {
'mileage': [20000, 40000, 15000, 35000],
'year': [2018, 2015, 2020, 2017],
'model': ["Acura", "Toyota", "BMW", "Honda"],
'price': [25000, 18000, 30000, 22000]
}
# Convert data to NumPy arrays
features = np.array([data['mileage'], data['year']]).T
labels = np.array(data['price'])
# Split data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(features, labels,
test_size=0.2, random_state=42)
# Create and train the decision tree model
model = DecisionTreeRegressor()
model.fit(X_train, y_train)
# Predict price for a new car (replace with values)
new_car = [50000, 2012]
# Make prediction
predicted_price = model.predict([new_car])[0]
# Print result
print(f"Predicted price for new car: ${round(predicted_price, 2)}")

OUTPUT:

Predicted price for new car: $22000.0

DEPT. OF COMPUTER SCIENCE Page No. _____


SHREE MEDHA DEGREE COLLEGE, BALLARI

8. Python program of weather prediction model that predicts whether or


not there’ll be rain on a particular day.

from sklearn.tree import DecisionTreeClassifier


# Sample weather data (replace with real data collection)
data = [
[10, 70, False], # Sunny, High Pressure, No Rain
[15, 60, True], # Cloudy, Medium Pressure, Rain
[5, 90, False], # Foggy, High Pressure, No Rain
[20, 50, True], # Rainy, Low Pressure, Rain
]
# Features (predictors)
features = ["Temperature", "Pressure", "Cloudy"]
target = ["Rain"]
# Transform data into numerical features (replace with actual data processing)
X = [[d[0], d[1]] for d in data]
y = [d[2] for d in data]
# Train the decision tree model
model = DecisionTreeClassifier()
model.fit(X, y)
# Predict rain for a new day (replace with actual values)
new_day = [18, 65]
# Make prediction
prediction = model.predict([new_day])[0]
# Print result
rain_text = "Rain" if prediction else "No Rain"
print(f"Prediction for new day: {rain_text}")

Output:

Prediction for new day: Rain

DEPT. OF COMPUTER SCIENCE Page No. _____


SHREE MEDHA DEGREE COLLEGE, BALLARI

9. Python program of profit prediction model that states the probable


profit that can be generated from the sale of a product.

import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
import seaborn as sns
import matplotlib.pyplot as plt
# Load the dataset
data = pd.read_csv("startup_data.csv")
# Summary statistics of the data
print(data.describe())
# Correlation between features
data_no_state = data.drop('State', axis=1)
sns.heatmap(data_no_state.corr(), annot=True)
plt.show()
# Prepare the data for the model
X = data[["R&D Spend", "Administration cost", "Marketing cost"]]
y = data["Profit"]
# Train the linear regression model
model = LinearRegression()
model.fit(X, y)
# Predict the profit
def predict_profit(rnd_spend, admin_cost, marketing_cost):
X_new = np.array([[rnd_spend, admin_cost, marketing_cost]])
return model.predict(X_new)[0]
# Example usage
rnd_spend = 100000
admin_cost = 50000
marketing_cost = 200000
predicted_profit = predict_profit(rnd_spend, admin_cost, marketing_cost)
print(f"Predicted profit: ${predicted_profit:.2f}")

DEPT. OF COMPUTER SCIENCE Page No. _____


SHREE MEDHA DEGREE COLLEGE, BALLARI

OUTPUT:

SPEND Marketing
R&D ADMINISTRATION
cost Profit
cost
count 35 35 35 35

mean 72900 123758 200734 107673

std 49501 26136 132414 24658

min 0 51283 0 66514

25% 28709 114754 99134 93409

50% 76253 127056 201126 104055

75% 117233 145303 299200 114689

Max 165349 156547 471784 179658

Predicted profit: $139336.77

DEPT. OF COMPUTER SCIENCE Page No. _____


SHREE MEDHA DEGREE COLLEGE, BALLARI

10. Python program to classify the emails as spam or not spam.

from sklearn.naive_bayes import MultinomialNB


from sklearn.feature_extraction.text import CountVectorizer
# Sample email data (replace with your data loading)
emails = ["This is a normal email", "Click this link to win a prize!",
"Important update from your bank"]
labels = ["not spam", "spam", "not spam"]
# Feature extraction (convert text to numerical features)
vectorizer = CountVectorizer()
features = vectorizer.fit_transform(emails)
# Train the Naive Bayes model
model = MultinomialNB()
model.fit(features, labels)
# Classify a new email
new_email = "Free money just for you!"
new_features = vectorizer.transform([new_email])
prediction = model.predict(new_features)[0]
# Print the prediction
print(f"New email is classified as: {prediction}")

OUTPUT:

New email is classified as: not spam

DEPT. OF COMPUTER SCIENCE Page No. _____


SHREE MEDHA DEGREE COLLEGE, BALLARI

11. Python program that demonstrates how to classify flowers using a


Support Vector Machine (SVM) classifier.

from sklearn.datasets import load_iris


from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
# Load the Iris dataset
iris=load_iris()
# Separate features (X) and target labels (y)
X = iris.data
y = iris.target
# Split data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Create and train the SVM classifier
clf=SVC(kernel="linear")
clf.fit(X_train, y_train)
# Predict flower type for a new sample
new_flower = [5.1, 3.5, 1.4, 0.2]
prediction = clf.predict([new_flower])[0]
# Print the predicted flower type
flower_names = iris.target_names[prediction]
print(f"Predicted flower type: {flower_names}")

OUTPUT:

Predicted flower type: setosa

DEPT. OF COMPUTER SCIENCE Page No. _____


SHREE MEDHA DEGREE COLLEGE, BALLARI

12. Python program that demonstrates how to use a basic Artificial Neural
Network (ANN) to classify students based on their height and weight.
# Import necessary libraries
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# Sample dataset
heights = [155, 160, 165, 170, 175, 180, 185, 190, 195, 200]
weights = [50, 55, 60, 65, 70, 75, 80, 85, 90, 95]
labels = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1] # 0: underweight, 1: normal weight
# Combine data into a single array
data = np.column_stack((heights, weights))
# Split data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.2,
random_state=42)
# Create and train an ANN classifier
ann = MLPClassifier(hidden_layer_sizes=(5,), max_iter=1000)
ann.fit(X_train, y_train)
# Evaluate the ANN classifier
accuracy = ann.score(X_test, y_test)
print(f"Accuracy: {accuracy:.2f}")
# Use the ANN classifier to make predictions
predictions = ann.predict(X_test)
# Plot the data and predictions
plt.scatter(X_test[:, 0], X_test[:, 1], c=predictions)
plt.xlabel("Height (cm)")

DEPT. OF COMPUTER SCIENCE Page No. _____


SHREE MEDHA DEGREE COLLEGE, BALLARI
plt.ylabel("Weight (kg)")
plt.title("Student Classification")
plt.show()

OUTPUT:

DEPT. OF COMPUTER SCIENCE Page No. _____


SHREE MEDHA DEGREE COLLEGE, BALLARI

13. Python program that demonstrates text classification using scikit-learn


and a Naive Bayes classifier.
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfVectorizer
# Sample text data (replace with your data)
documents = ["This movie was absolutely amazing!",
"The restaurant food was very disappointing.",
"I would recommend this book to everyone."]
labels = ["positive", "negative", "positive"]
# Feature extraction with TF-IDF
vectorizer = TfidfVectorizer()
features = vectorizer.fit_transform(documents)
# Train the Naive Bayes model
model = MultinomialNB()
model.fit(features, labels)
# Classify a new piece of text
new_text = "This product is terrible."
new_features = vectorizer.transform([new_text])
prediction = model.predict(new_features)[0]
# Print the prediction
print(f"New text classified as: {prediction}")

OUTPUT:

New text classified as: positive

DEPT. OF COMPUTER SCIENCE Page No. _____


SHREE MEDHA DEGREE COLLEGE, BALLARI

14. Python program using the speech recognition library to perform


speech recognition.
import speech_recognition as sr

def speech_to_text():
r=sr.Recognizer()
with sr.Microphone() as source:
print("Please say something:")
audio=r.listen(source)
try:
voice_data=""
voice_data=r.recognize_google(audio)
print(voice_data)
except sr.UnknownValueError:
print("Could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service;
{0}".format(e))

OUTPUT:

PLEASE SAY SOMETHING:


HELLO

DEPT. OF COMPUTER SCIENCE Page No. _____


SHREE MEDHA DEGREE COLLEGE, BALLARI

15. Python program using the PIL (Pillow) library to illustrate basic image
processing operations like opening an image, resizing it, applying a filter,
and saving the processed image.

From PIL import Image, ImageFilter


# Open an image
image_path = “your_image.jpg”
original_image = Image.open(image_path)
# Resize the image
new_size = (300, 200) # Specify the new width and height
resized_image = original_image.resize(new_size)
# Apply a filter (e.g., Gaussian blur)
blurred_image = resized_image.filter(ImageFilter.GaussianBlur(radius=5))
# Save the processed image
output_path = “processed_image.jpg”
blurred_image.save(output_path)
print(“Image processing complete. Saved as”, output_path)

OUTPUT:

Image processing complete. Saved as processed_image

Your Image Processing Image

DEPT. OF COMPUTER SCIENCE Page No. _____

You might also like