0% found this document useful (0 votes)
41 views19 pages

Ai Lab

Uploaded by

22011a0814
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
41 views19 pages

Ai Lab

Uploaded by

22011a0814
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 19

J.N.T.U.H.

COLLEGE OF
ENGINEERING KUKATPALLY,
HYDERABAD – 500085

CERTIFICATE
Certificate that this is bonafide record of the practicial work
done during the academic year 2024-2025 by
Name : RUPIREDDY ANJULA REDDY
Roll Number : 22011A0814 Class: III BTech I SEM
In the laboratory of Artificial Intelligence Lab
of the department of COMPUTER SCIENCE AND
ENGINEERING.

Signature of Staff Member Signature of HOD


Date of Examination:
LIST OF PROGRAMS
22011A0814
S.No List of Pg.no Sign.
programs
1 Basic Python
Programming

2 Linear
Regression
3 Logistic
Regression
4 Gradient
Descent

5 Perform and
plot overfitting
6 Implementation
of KNN
classification
7 Implementation
of K-means
Clusttering

22011A0814
1)BASIC PYTHON PROGRAMMING
1. Use for, split(), and if to create a Statement that will print out words that
start with 's'
CODE:
st='print only words that start with s in this
sentence'
for word in st.split():
if word[0]=='s':
print(word)

Output:
start
s
sentence

2. Use range() to print all the even numbers from 0 to 10.


CODE:
range(0,11,2)
list(range(0,11,2))

Output:
[0, 2, 4, 6, 8, 10]

3. Write a program that prints the integers from 1 to 100. But for multiples of
three print "Fizz" instead of the number, and for the multiples of five print
"Buzz". For numbers which are multiples of both three and five print
"FizzBuzz".
CODE:
for num in range(1,101):
if num%3==0 and num%5==0:
print("FizzBuzz")
elif num%3==0:
print("Fizz")
elif num%5==0:
22011A0814
print("Buzz")
else:
print(num)
Output:

2
Fizz
4
Buzz
Fizz
7
8
Fizz
Buzz
11
Fizz
13
14
FizzBuzz
16
17
Fizz
19
Buzz
Fizz
22
23
Fizz
Buzz
26
Fizz
28
29
FizzBuzz
31
32
Fizz
34
Buzz
Fizz
37
38
Fizz
Buzz
41
Fizz
43
44

22011A0814
FizzBuzz
46
47
Fizz
49
Buzz
Fizz
52
53
Fizz
Buzz
56
Fizz
58
59
FizzBuzz
61
62
Fizz
64
Buzz
Fizz
67
68
Fizz
Buzz
71
Fizz
73
74
FizzBuzz
76
77
Fizz
79
Buzz
Fizz
82
83
Fizz
Buzz
86
Fizz
88
89
FizzBuzz
91
92
Fizz
94
Buzz

22011A0814
Fizz
97
98
Fizz
Buzz

4.Write a program to check whether the element is


palindrome or not.
CODE:
def check_palindrome(n):
reverse = 0
temp = n
while temp != 0:
reverse = reverse * 10 + temp % 10
temp = temp // 10
return reverse == n
n = 12321
if check_palindrome(n):
print("Yes")
else:
print("No")

OUTPUT:
Yes

5. To test whether the given number is in range or


not
CODE:
def test_range(n):
if n in range(3, 9):
print("%s is in the range" % str(n))
else:
print("The number is outside the given
range")
test_range(5)

OUTPUT:
5 is in the range

22011A0814
2.LINEAR REGRESSION

CODE:
import matplotlib.pyplot as plt
x=[10,11,12,13,14,15,16,17,18,19]
y=[11,13,12,15,17,18,18,19,20,22]
plt.scatter(x,y)
plt.show

Output:

22011A0814
#2 plotting the best fit line
import matplotlib.pyplot as plt
from scipy import stats
x=[10,11,12,13,14,15,16,17,18,19]
y=[11,13,12,15,17,18,18,19,20,22]
slope,intercept,r,p,std_err=stats.linregress(x,y)
def myfunc(x):
return slope*x+intercept
mymodel=list(map(myfunc,x))
plt.scatter(x,y)
plt.plot(x,mymodel)
plt.show

Output:

22011A0814
3.LOGISTIC REGRESSION
CODE:
import numpy
import matplotlib.pyplot as plt
from sklearn import linear_model
X = numpy.array([3.78, 2.44, 2.09, 0.14, 1.72, 1.65,
4.92, 4.37, 4.96, 4.52, 3.69, 5.88]).reshape(-1,1)
y = numpy.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
plt.scatter(X,y)
plt.show()

logr = linear_model.LogisticRegression()
logr.fit(X,y)

#predict if tumor is cancerous where the size is


3.69mm:
predicted =
logr.predict(numpy.array([3.69]).reshape(-1,1))
print(predicted)
Output:

[1]

22011A0814
4.GRADIENT DESCENT
CODE:
# Importing the required libraries
import numpy as np
import matplotlib.pyplot as plt
# Function to calculate the mean squared error
def mean_sq_error(y_true, y_pred):
cost = np.sum((y_true - y_pred) ** 2) / len(y_true)
return cost
# Implementing the Gradient Descent function
def gradient_descent(x, y, iter=1000, learning_rate=0.0001,
threshold=1e-6):
curr_weight = 0.1
curr_error = 0.01
n = float(len(x))
cost = []
weight = []
prev_cost = None
for i in range(iter):
# Calculating the predicted values
y_pred = (curr_weight * x) + curr_error
# Calculating the current cost
curr_cost = mean_sq_error(y, y_pred)

22011A0814
# Stopping condition
if prev_cost and abs(prev_cost - curr_cost) <= threshold:
break
prev_cost = curr_cost
cost.append(curr_cost)
weight.append(curr_weight)
# Calculating gradients
derivative_weight = -(2 / n) * sum(x * (y - y_pred))
derivative_error = -(2 / n) * sum(y - y_pred)
# Updating weights and errors
curr_weight = curr_weight - (learning_rate * derivative_weight)
curr_error = curr_error - (learning_rate * derivative_error)
# Printing parameters for every 500 iterations
if (i + 1) % 500 == 0:
print(f"At iteration {i + 1}: Cost = {curr_cost}, Weight =
{curr_weight}, Error = {curr_error}")
# Plotting cost vs weight
plt.figure(figsize=(8, 6))
plt.plot(weight, cost)
plt.scatter(weight, cost, marker='o', color='red')
plt.title("Cost vs Weight For the Linear Regression Model")
plt.ylabel("Cost")
plt.xlabel("Weight")
plt.show()

22011A0814
return curr_weight, curr_error
# Data
X = np.array([31.502527, 52.426403, 60.53035803, 49.47563963,
55.81320787,
50.14218841, 55.21179669, 37.29956669, 49.10504169,
55.55001444,
40.41973014, 55.35163488, 49.1640495, 59.16847072,
54.72720806,
49.95588857, 47.68719623, 63.29732685, 49.61864377,
39.81681754])

Y = np.array([35.70700585, 69.77759598, 60.5623823, 74.54663223,


89.23092513,
79.21151827, 75.64197305, 54.17148932, 79.3312423,
70.30087989,
52.16567715, 85.47884676, 60.00892325, 79.39287043,
84.43619216,
65.72360244, 85.89250373, 99.37989686, 45.84715332,
59.87721319])
# Calling the gradient descent function
est_weight, est_error = gradient_descent(X, Y, iter=2000)
print(f"The estimated weight is: {est_weight} and the estimated error
is: {est_error}")
# Predicting values using the model
Y_pred = est_weight * X + est_error

22011A0814
# Plotting the regression line
plt.figure(figsize=(8, 6))
plt.scatter(X, Y, marker='o', color='red')
plt.plot([min(X), max(X)], [min(Y_pred), max(Y_pred)], color='green',
linestyle='dashed')
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Linear Regression Line")
plt.show()
Output:
At iteration 500: Cost = 112.64693787093867, Weight =
1.1988626933557767, Error = -0.08772375861141312
At iteration 1000: Cost = 52.88984191810187, Weight =
1.4787957693496838, Error = -0.1879671702437235
At iteration 1500: Cost = 51.67521875926768, Weight =
1.498470740959655, Error = -0.229142002191261
At iteration 2000: Cost = 51.637043038964446, Weight =
1.4999853253639915, Error = -0.24999779540108228
The estimated weight is: 1.4999853253639915 and the estimated
error is: -0.24999779540108228

22011A0814
22011A0814
5.PERFORM AND PLOT OVERFITTING
CODE:
# Importing required libraries
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
import numpy as np
import matplotlib.pyplot as plt
# Generate synthetic data
np.random.seed(0)
n_samples = 30
X = np.sort(np.random.rand(n_samples))
y = np.sin(2 * np.pi * X) + np.random.randn(n_samples) * 0.1
# Define polynomial degrees to fit
degrees = [1, 4, 15]
# Plot polynomial regression models
plt.figure(figsize=(15, 5))
for i, degree in enumerate(degrees):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
# Transform input features to include polynomial terms
poly = PolynomialFeatures(degree=degree,
include_bias=False)
X_poly = poly.fit_transform(X.reshape(-1, 1))
# Fit a linear regression model
model = LinearRegression()
model.fit(X_poly, y)

22011A0814
# Plot data points and polynomial regression curve
plt.scatter(X, y, s=20, label='Data')
plt.plot(X, model.predict(X_poly), color='red', label='Model')
plt.legend()
# Add titles based on degree
if degree == 1:
plt.title(f'Degree {degree} (Underfitting)')
elif degree == 4:
plt.title(f'Degree {degree} (Balanced)')
elif degree == 15:
plt.title(f'Degree {degree} (Overfitting)')
plt.tight_layout()
plt.show()
OUTPUT:

22011A0814
6.IMPLEMENTATION OF KNN
CLASSIFICATION ALGORITHM
CODE:
# Import necessary modules
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
import numpy as np
import matplotlib.pyplot as plt

irisData = load_iris()

# Create feature and target arrays


X = irisData.data
y = irisData.target

# Split into training and test set


X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size = 0.2, random_state=42)

neighbors = np.arange(1, 9)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))

# Loop over K values


for i, k in enumerate(neighbors):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)

# Compute training and test data accuracy


train_accuracy[i] = knn.score(X_train, y_train)
test_accuracy[i] = knn.score(X_test, y_test)

# Generate plot
plt.plot(neighbors, test_accuracy, label = 'Testing
dataset Accuracy')
plt.plot(neighbors, train_accuracy, label = 'Training
dataset Accuracy')

plt.legend()
plt.xlabel('n_neighbors')

22011A0814
plt.ylabel('Accuracy')
plt.show()
Output:

22011A0814
7.IMPLEMENTATION OF K-MEANS
CLUSTERING
CODE:

22011A0814

You might also like