0% found this document useful (0 votes)
7 views

ml lab

The document contains various Python programs demonstrating different algorithms and functionalities, including Locally Weighted Regression, SVM, KNN, and others. It also includes examples for generating random numbers, checking leap years, calculating factorials, and implementing a simple calculator. Additionally, it covers machine learning concepts like Random Forest and Perceptron algorithms, along with practical applications like generating Fibonacci sequences and displaying calendars.

Uploaded by

asheena578
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
7 views

ml lab

The document contains various Python programs demonstrating different algorithms and functionalities, including Locally Weighted Regression, SVM, KNN, and others. It also includes examples for generating random numbers, checking leap years, calculating factorials, and implementing a simple calculator. Additionally, it covers machine learning concepts like Random Forest and Perceptron algorithms, along with practical applications like generating Fibonacci sequences and displaying calendars.

Uploaded by

asheena578
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 23

1.

Implement the non-parametric Locally Weighted Regression algorithm in order to fit


data points. Select an appropriate data set for your experiment and draw graphs.

Program:
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def kernel(point, xmat, k):
m,n = np.shape(xmat)
weights = np.mat(np1.eye((m)))
for j in range(m):
diff = point - X[j]
weights[j,j] = np.exp(diff*diff.T/(-2.0*k**2))
return weights
def localWeight(point, xmat, ymat, k):
wei = kernel(point,xmat,k)
W = (X.T*(wei*X)).I*(X.T*(wei*ymat.T))
return W
def localWeightRegression(xmat, ymat, k):
m,n = np.shape(xmat)
ypred = np.zeros(m)
for i in range(m):
ypred[i] = xmat[i]*localWeight(xmat[i],xmat,ymat,k)
return ypred
# load data points
data = pd.read_csv('10-dataset.csv')
bill = np.array(data.total_bill)
tip = np.array(data.tip)
#preparing and add 1 in bill
mbill = np.mat(bill)
mtip = np.mat(tip)
m= np.shape(mbill)[1]
one = np.mat(np1.ones(m))
X = np.hstack((one.T,mbill.T))
#set k here
ypred = localWeightRegression(X,mtip,0.5)
SortIndex = X[:,1].argsort(0)
xsort = X[SortIndex][:,0]
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(bill,tip, color='green')
ax.plot(xsort[:,1],ypred[SortIndex], color = 'red', linewidth=5)
plt.xlabel('Total bill')
plt.ylabel('Tip')
plt.show();
2. Write a Python Program to Generate a Random Number.

import random
n = random.randint(0,50)
print(n)

Output:
40

import random
n = random.randint(100, 200)
print(n)

Output:
143

3. To generate a python code for SVM algorithm.

#Load the necessary python libraries


import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score

data = pd.read_csv('/content/Breast_cancer_data.csv')
data.head()

data.isna().sum()

data.describe()

data.info()

corr = data.corr()
fig = plt.figure(figsize=(15,12))
a = sns.heatmap(corr, cmap='Oranges')
a.set_title("Data Correlation")

y = data["diagnosis"].values
X=data.drop(["diagnosis"],axis=1)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size = 0.3,random_state=1)

svc_diag = SVC(C=10,kernel='linear')
predicted=svc_diag.predict(X_test)
acc_svc=accuracy_score(y_test,predicted)
print('Accuracy Score of Linear Model: ',acc_svc)

svc_diag=SVC(C=10,kernel='rbf',gamma=2)
svc_diag.fit(X_train,y_train)

predicted=svc_diag.predict(X_test)
acc_svc=accuracy_score(y_test,predicted)
print('Accuracy Score of Gaussian Model: ',acc_svc)

Output:

4.Python code for find leap year.


1. # Default function to implement conditions to check leap year
2. def CheckLeap(Year):
3. # Checking if the given year is leap year
4. if((Year % 400 == 0) or
5. (Year % 100 != 0) and
6. (Year % 4 == 0)):
7. print("Given Year is a leap Year");
8. # Else it is not a leap year
9. else:
10. print ("Given Year is not a leap Year")
11. # Taking an input year from user
12. Year = int(input("Enter the number: "))
13. # Printing result
14. CheckLeap(Year)

Output:

Enter the number: 1700


Given year is not a leap Year

5. Implement KNN algorithm to classify the data set using python.


Refer Ex No :8

6. Python program to find the factorial of a number.

# Python program to find the factorial of a number provided by the user.

# change the value for a different result


num = 7

# To take input from the user


#num = int(input("Enter a number: "))

factorial = 1
# check if the number is negative, positive or zero
if num < 0:
print("Sorry, factorial does not exist for negative numbers")
elif num == 0:
print("The factorial of 0 is 1")
else:
for i in range(1,num + 1):
factorial = factorial*i
print("The factorial of",num,"is",factorial)

The factorial of 7 is 5040

7. Generate a python code for K-means algorithm


Refer EX NO :7

8.Python program to display the Fibonacci sequence

# Program to display the Fibonacci sequence up to n-th term

nterms = int(input("How many terms? "))

# first two terms


n1, n2 = 0, 1
count = 0

# check if the number of terms is valid


if nterms <= 0:
print("Please enter a positive integer")
# if there is only one term, return n1
elif nterms == 1:
print("Fibonacci sequence upto",nterms,":")
print(n1)
# generate fibonacci sequence
else:
print("Fibonacci sequence:")
while count < nterms:
print(n1)
nth = n1 + n2
# update values
n1 = n2
n2 = nth
count += 1

Output

How many terms? 7


Fibonacci sequence:
0
1
1
2
3
5
8
9.Max Margin Classifier Program

from sklearn.svm import LinearSVC


from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
def load_data():
# Load your data here
X, y = make_classification(n_samples=1000,
n_features=4, random_state=42)

# Split the data into training and test sets


X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)

return X_train, X_test, y_train, y_test


# Generate some synthetic data
X, y = make_classification(n_samples=1000,
n_features=4, random_state=42)
# Split the data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)
# Create the model
model = LinearSVC(random_state=42)
# Fit the model to the training data
model.fit(X_train, y_train)

# Evaluate the model on the test data


accuracy = model.score(X_test, y_test)
print("Test accuracy: {:.2f}".format(accuracy))

10. Develop a Python code to Check Armstrong Number

num = int(input("Enter a number: "))


sum = 0
n1 = len(str(num))
temp = num
while temp > 0:
digit = temp % 10
sum += digit ** n1
temp //= 10
if num == sum:
print(num,"is an Armstrong number")
else:
print(num,"is not an Armstrong number")

Output
371 is an Armstrong number
11.Creating and Visualizing a Random Forest Classification Model in Machine
Learning Using Python

Problem Statement: Use Machine Learning to predict cases of breast cancer using
patient treatment history and health data

Dataset: Breast Cancer Wisconsin (Diagnostic) Dataset

Let us have a quick look at the dataset:

Classification Model Building: Random Forest in Python

Let us build the classification model with the help of a random forest algorithm.

Step 1: Load Pandas library and the dataset using Pandas


Step 2: Define the features and the target
Step 3: Split the dataset into train and test sklearn

Step 4: Import the random forest classifier function from sklearn ensemble
module. Build the random forest classifier model with the help of the random
forest classifier function

Step 5: Predict values using the random forest classifier model

Step 6: Evaluate the random forest classifier model


12.Simple Calculator using Python

# This function adds two numbers


def add(x, y):
return x + y
# This function subtracts two numbers
def subtract(x, y):
return x - y
# This function multiplies two numbers
def multiply(x, y):
return x * y
# This function divides two numbers
def divide(x, y):
return x / y
print("Select operation.")
print("1.Add")
print("2.Subtract")
print("3.Multiply")
print("4.Divide")
while True:
# take input from the user
choice = input("Enter choice(1/2/3/4): ")
# check if choice is one of the four options
if choice in ('1', '2', '3', '4'):
try:
num1 = float(input("Enter first number: "))
num2 = float(input("Enter second number: "))
except ValueError:
print("Invalid input. Please enter a number.")
continue
if choice == '1':
print(num1, "+", num2, "=", add(num1, num2))
elif choice == '2':
print(num1, "-", num2, "=", subtract(num1, num2))
elif choice == '3':
print(num1, "*", num2, "=", multiply(num1, num2))
elif choice == '4':
print(num1, "/", num2, "=", divide(num1, num2))
# check if user wants another calculation
# break the while loop if answer is no
next_calculation = input("Let's do next calculation? (yes/no): ")
if next_calculation == "no":
break
else:
print("Invalid Input")

Output:
Select operation.
1.Add
2.Subtract
3.Multiply
4.Divide
Enter choice(1/2/3/4): 3
Enter first number: 15
Enter second number: 14
15.0 * 14.0 = 210.0
Let's do next calculation? (yes/no): no

13. Perceptron Algorithm using python

# Perceptron Algorithm on the Sonar Dataset


from random import seed
from random import randrange
from csv import reader

# Load a CSV file


def load_csv(filename):
dataset = list()
with open(filename, 'r') as file:
csv_reader = reader(file)
for row in csv_reader:
if not row:
continue
dataset.append(row)
return dataset

# Convert string column to float


def str_column_to_float(dataset, column):
for row in dataset:
row[column] = float(row[column].strip())

# Convert string column to integer


def str_column_to_int(dataset, column):
class_values = [row[column] for row in dataset]
unique = set(class_values)
lookup = dict()
for i, value in enumerate(unique):
lookup[value] = i
for row in dataset:
row[column] = lookup[row[column]]
return lookup

# Split a dataset into k folds


def cross_validation_split(dataset, n_folds):
dataset_split = list()
dataset_copy = list(dataset)
fold_size = int(len(dataset) / n_folds)
for i in range(n_folds):
fold = list()
while len(fold) < fold_size:
index = randrange(len(dataset_copy))
fold.append(dataset_copy.pop(index))
dataset_split.append(fold)
return dataset_split

# Calculate accuracy percentage


def accuracy_metric(actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0

# Evaluate an algorithm using a cross validation split


def evaluate_algorithm(dataset, algorithm, n_folds, *args):
folds = cross_validation_split(dataset, n_folds)
scores = list()
for fold in folds:
train_set = list(folds)
train_set.remove(fold)
train_set = sum(train_set, [])
test_set = list()
for row in fold:
row_copy = list(row)
test_set.append(row_copy)
row_copy[-1] = None
predicted = algorithm(train_set, test_set, *args)
actual = [row[-1] for row in fold]
accuracy = accuracy_metric(actual, predicted)
scores.append(accuracy)
return scores

# Make a prediction with weights


def predict(row, weights):
activation = weights[0]
for i in range(len(row)-1):
activation += weights[i + 1] * row[i]
return 1.0 if activation >= 0.0 else 0.0

# Estimate Perceptron weights using stochastic gradient descent


def train_weights(train, l_rate, n_epoch):
weights = [0.0 for i in range(len(train[0]))]
for epoch in range(n_epoch):
for row in train:
prediction = predict(row, weights)
error = row[-1] - prediction
weights[0] = weights[0] + l_rate * error
for i in range(len(row)-1):
weights[i + 1] = weights[i + 1] + l_rate * error * row[i]
return weights

# Perceptron Algorithm With Stochastic Gradient Descent


def perceptron(train, test, l_rate, n_epoch):
predictions = list()
weights = train_weights(train, l_rate, n_epoch)
for row in test:
prediction = predict(row, weights)
predictions.append(prediction)
return(predictions)

# Test the Perceptron algorithm on the sonar dataset


seed(1)
# load and prepare data
filename = 'sonar.all-data.csv'
dataset = load_csv(filename)
for i in range(len(dataset[0])-1):
str_column_to_float(dataset, i)
# convert string class to integers
str_column_to_int(dataset, len(dataset[0])-1)
# evaluate algorithm
n_folds = 3
l_rate = 0.01
n_epoch = 500
scores = evaluate_algorithm(dataset, perceptron, n_folds, l_rate, n_epoch)
print('Scores: %s' % scores)
print('Mean Accuracy: %.3f%%' % (sum(scores)/float(len(scores))))

14.Calender

1. import calendar
2. And then apply the syntax
3. (calendar.month(yy,mm))

See this example:

1. import calendar
2. # Enter the month and year
3. yy = int(input("Enter year: "))
4. mm = int(input("Enter month: "))
5.
6. # display the calendar
7. print(calendar.month(yy,mm))
Output:
15.Gradient Decent
# Importing Libraries
import numpy as np
import matplotlib.pyplot as plt
def mean_squared_error(y_true, y_predicted):

# Calculating the loss or cost


cost = np.sum((y_true-y_predicted)**2) / len(y_true)
return cost
# Gradient Descent Function
# Here iterations, learning_rate, stopping_threshold
# are hyperparameters that can be tuned
def gradient_descent(x, y, iterations = 1000, learning_rate = 0.0001,
stopping_threshold = 1e-6):
# Initializing weight, bias, learning rate and iterations
current_weight = 0.1
current_bias = 0.01
iterations = iterations
learning_rate = learning_rate
n = float(len(x))
costs = []
weights = []
previous_cost = None

# Estimation of optimal parameters


for i in range(iterations):

# Making predictions
y_predicted = (current_weight * x) + current_bias

# Calculating the current cost


current_cost = mean_squared_error(y, y_predicted)

# If the change in cost is less than or equal to


# stopping_threshold we stop the gradient descent
if previous_cost and abs(previous_cost-current_cost)<=stopping_threshold:
break
previous_cost = current_cost
costs.append(current_cost)
weights.append(current_weight)
# Calculating the gradients
weight_derivative = -(2/n) * sum(x * (y-y_predicted))
bias_derivative = -(2/n) * sum(y-y_predicted)
# Updating weights and bias
current_weight = current_weight - (learning_rate * weight_derivative)
current_bias = current_bias - (learning_rate * bias_derivative)

# Printing the parameters for each 1000th iteration


print(f"Iteration {i+1}: Cost {current_cost}, Weight \
{current_weight}, Bias {current_bias}")
# Visualizing the weights and cost at for all iterations
plt.figure(figsize = (8,6))
plt.plot(weights, costs)
plt.scatter(weights, costs, marker='o', color='red')
plt.title("Cost vs Weights")
plt.ylabel("Cost")
plt.xlabel("Weight")
plt.show()
return current_weight, current_bias
def main():
# Data
X = np.array([32.50234527, 53.42680403, 61.53035803, 47.47563963, 59.81320787,
55.14218841, 52.21179669, 39.29956669, 48.10504169, 52.55001444,
45.41973014, 54.35163488, 44.1640495 , 58.16847072, 56.72720806,
48.95588857, 44.68719623, 60.29732685, 45.61864377, 38.81681754])
Y = np.array([31.70700585, 68.77759598, 62.5623823 , 71.54663223, 87.23092513,
78.21151827, 79.64197305, 59.17148932, 75.3312423 , 71.30087989,
55.16567715, 82.47884676, 62.00892325, 75.39287043, 81.43619216,
60.72360244, 82.89250373, 97.37989686, 48.84715332, 56.87721319])

# Estimating weight and bias using gradient descent


estimated_weight, estimated_bias = gradient_descent(X, Y, iterations=2000)
print(f"Estimated Weight: {estimated_weight}\nEstimated Bias: {estimated_bias}")

# Making predictions using estimated parameters


Y_pred = estimated_weight*X + estimated_bias

# Plotting the regression line


plt.figure(figsize = (8,6))
plt.scatter(X, Y, marker='o', color='red')
plt.plot([min(X), max(X)], [min(Y_pred), max(Y_pred)], color='blue',markerfacecolor='red',
markersize=10,linestyle='dashed')
plt.xlabel("X")
plt.ylabel("Y")
plt.show()

if __name__=="__main__":
main()

Output:

Iteration 1: Cost 4352.088931274409, Weight 0.7593291142562117, Bias 0.02288558130709


Iteration 2: Cost 1114.8561474350017, Weight 1.081602958862324, Bias 0.02918014748569513
Iteration 3: Cost 341.42912086804455, Weight 1.2391274084945083, Bias 0.03225308846928192
Iteration 4: Cost 156.64495290904443, Weight 1.3161239281746984, Bias 0.03375132986012604
Iteration 5: Cost 112.49704004742098, Weight 1.3537591652024805, Bias 0.034479873154934775
Iteration 6: Cost 101.9493925395456, Weight 1.3721549833978113, Bias 0.034832195392868505
Iteration 7: Cost 99.4293893333546, Weight 1.3811467575154601, Bias 0.03500062439068245
Iteration 8: Cost 98.82731958262897, Weight 1.3855419247507244, Bias 0.03507916814736111
Iteration 9: Cost 98.68347500997261, Weight 1.3876903144657764, Bias 0.035113776874486774
Iteration 10: Cost 98.64910780902792, Weight 1.3887405007983562, Bias 0.035126910596389935
Iteration 11: Cost 98.64089651459352, Weight 1.389253895811451, Bias 0.03512954755833985
Iteration 12: Cost 98.63893428729509, Weight 1.38950491235671, Bias 0.035127053821718185
Iteration 13: Cost 98.63846506273883, Weight 1.3896276808137857, Bias 0.035122052266051224
Iteration 14: Cost 98.63835254057648, Weight 1.38968776283053, Bias 0.03511582492978764
Iteration 15: Cost 98.63832524036214, Weight 1.3897172043139192, Bias 0.03510899846107016
Iteration 16: Cost 98.63831830104695, Weight 1.389731668997059, Bias 0.035101879159522745
Iteration 17: Cost 98.63831622628217, Weight 1.389738813163012, Bias 0.03509461674147458
Estimated Weight: 1.389738813163012
Estimated Bias: 0.03509461674147458
16.Natural Numbers
# Python program to find the sum of natural using recursive function
def recur_sum(n):
if n <= 1:
return n
else:
return n + recur_sum(n-1)

# change this value for a different result


num = 16

if num < 0:
print("Enter a positive number")
else:
print("The sum is",recur_sum(num))

Output:
The sum is 136

17.Simple Linear Regression in Python

Consider ‘lstat’ as independent and ‘medv’ as dependent variables

Step 1: Load the Boston dataset

Step 2: Have a glance at the shape

Step 3: Have a glance at the dependent and independent variables


Step 4: Visualize the change in the variables

Step 5: Divide the data into independent and dependent variables

Step 6: Split the data into train and test sets

Step 7: Shape of the train and test sets


Step 8: Train the algorithm

Step 9: Retrieve the intercept

Step 10: Retrieve the slope

Step 11: Predicted value


Step 12: Actual value

Step 13: Evaluate the algorithm

18. Program to add two matrices


# Program to add two matrices using nested loop
A=[]
n=int(input("Enter N for N x N matrix : ")) #3 here
#use list for storing 2D array
#get the user input and store it in list (here IN : 1 to 9)
print("Enter the element ::>")
for i in range(n):
row=[] #temporary list to store the row
for j in range(n):
row.append(int(input())) #add the input to row list
A.append(row) #add the row to the list
print(A)
# [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
#Display the 2D array
print("Display Array In Matrix Form")
for i in range(n):
for j in range(n):
print(A[i][j], end=" ") #new line
print()
B=[]
n=int(input("Enter N for N x N matrix : ")) #3 here
#use list for storing 2D array
#get the user input and store it in list (here IN : 1 to 9)
print("Enter the element ::>")
for i in range(n):
row=[] #temporary list to store the row
for j in range(n):
row.append(int(input())) #add the input to row list
B.append(row) #add the row to the list
print(B)
# [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
#Display the 2D array
print("Display Array In Matrix Form")
for i in range(n):
for j in range(n):
print(B[i][j], end=" ")
print() #new line
result = [[0,0,0], [0,0,0], [0,0,0]]
# iterate through rows
for i in range(n):
# iterate through columns
for j in range(len(A[0])):
result[i][j] = A[i][j] + B[i][j]
print("Resultant Matrix is ::>")
for r in result:
print("Resultant Matrix is ::>",r)

Output
Enter N for N x N matrix : 3
Enter the element ::>
10
10
10
20
20
20
30
30
30
[[10, 10, 10], [20, 20, 20], [30, 30, 30]]
Display Array In Matrix Form
10 10 10
20 20 20
30 30 30
Enter N for N x N matrix : 3
Enter the element ::>
100
100
100
200
200
200
300
300
300
[[100, 100, 100], [200, 200, 200], [300, 300, 300]]
Display Array In Matrix Form
100 100 100
200 200 200
300 300 300
Resultant Matrix is ::> [110, 110, 110]
[220, 220, 220]
[330, 330, 330]
19.Write a python code for Multiple linear regression algorithm.

import numpy as np
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def generate_dataset(n):
x = []
y = []
random_x1 = np.random.rand()
random_x2 = np.random.rand()
for i in range(n):
x1 = i
x2 = i/2 + np.random.rand()*n
x.append([1, x1, x2])
y.append(random_x1 * x1 + random_x2 * x2 + 1)
return np.array(x), np.array(y)
x, y = generate_dataset(200)
mpl.rcParams['legend.fontsize'] = 12
fig = plt.figure()
ax = fig.add_subplot(projection ='3d')
ax.scatter(x[:, 1], x[:, 2], y, label ='y', s = 5)
ax.legend()
ax.view_init(45, 0)
plt.show()

Output:

20. Design a Python Program to Sort Words in Alphabetic Order


1. my_str = input("Enter a string: ")
2. # breakdown the string into a list of words
3. words = my_str.split()
4. # sort the list
5. words.sort()
6. # display the sorted words
7. for word in words:
8. print(word)

You might also like