Da Program
Da Program
To get the input from user and perform numerical operations (MAX, MIN, AVG, SUM, SQRT,
ROUND)using in Python.
def get_user_input():
nums = []
while True:
num = input("Enter a number (or 'done' to finish): ")
if num.lower() == 'done':
break
else:
try:
nums.append(float(num))
except ValueError:
print("Invalid input. Please enter a valid number.")
return nums
def numerical_operations(nums):
if not nums:
print("No numbers entered.")
return
maximum = max(nums)
minimum = min(nums)
average = sum(nums) / len(nums)
total_sum = sum(nums)
sqrt_values = [round(num ** 0.5, 2) for num in nums]
rounded_values = [round(num) for num in nums]
print("Maximum:", maximum)
print("Minimum:", minimum)
print("Average:", average)
print("Sum:", total_sum)
print("Square roots:", sqrt_values)
print("Rounded values:", rounded_values)
def main():
print("Enter numbers to perform numerical operations.")
numbers = get_user_input()
numerical_operations(numbers)
if __name__ == "__main__":
main()
OUTPUT
To perform dimensionality reduction operation using PCA for Houses Data Set in Python.
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
url =
"https://raw.githubusercontent.com/dipanjanS/practical-machine-learning-with-python/master
/notebooks/Ch05_Machine_Learning_Pipeline/AB_NYC_2019.csv"
data = pd.read_csv(url)
result = apply_pca(data)
print(result.head())
OUTPUT
PROGRAM - 6
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.metrics import accuracy_score, confusion_matrix
iris = datasets.load_iris()
X = iris.data
y = iris.target
model = LogisticRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
OUTPUT
Accuracy: 1.0
Confusion Matrix:
[[10 0 0]
[ 0 9 0]
[ 0 0 11]]
PROGRAM - 7
To perform data import/export (.CSV, .XLS, .TXT) operations using data frames in Python.
import pandas as pd .
import pandas as pd
data = {
'Name': ['John', 'Anna', 'Peter', 'Linda'],
'Age': [28, 35, 42, 32],
'City': ['New York', 'Paris', 'Berlin', 'London']
}
df = pd.DataFrame(data)
df.to_csv('sample_data.csv', index=False)
df.to_excel('sample_data.xlsx', index=False)
df.to_csv('sample_data.txt', sep='\t', index=False)
df_csv = pd.read_csv('sample_data.csv')
df_excel = pd.read_excel('sample_data.xlsx')
df_txt = pd.read_csv('sample_data.txt', sep='\t')
To perform K-Means clustering operation and visualize for iris data set in Python.
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import tree
import warnings
warnings.filterwarnings('ignore')
iris = load_iris()
iris = sns.load_dataset('iris')
iris.head()
Write R/Python script to diagnose any disease using KNN classification and plot the resultsin
Python.
PROGRAM - 10
df = pd.DataFrame(data)
df['Items'] = df['Items'].apply(lambda x: ','.join(x))
one_hot_encoded = df['Items'].str.get_dummies(sep=',')
print("Frequent Itemsets:")
print(frequent_itemsets)
print("\nAssociation Rules:")
print(rules)
OUTPUT
PROGRAM - 11
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
data = {
'A': [1, 2, None, 4, 5],
'B': [10, 20, 30, None, 50],
'C': [100, 200, 300, 400, None]
}
df = pd.DataFrame(data)
df.fillna(df.mean(), inplace=True)
scaler = MinMaxScaler()
df_normalized = pd.DataFrame(scaler.fit_transform(df), columns=df.columns)