Indexdw
Indexdw
SOURCE CODE:
import pandas as pd
After Removing:
# 3. Handle missing or invalid values in numeric columns
# Convert numeric columns to proper types and replace invalid entries with NaN
numeric_columns = ['Duration', 'Pulse', 'Maxpulse', 'Calories']
for col in numeric_columns:
df[col] = pd.to_numeric(df[col], errors='coerce')
Before Dropping:
After Dropping
SOURCE CODE:
import pandas as pd
import streamlit as st
from mlxtend.frequent_patterns import apriori, association_rules
# Transforming the data into a DataFrame suitable for the apriori function
df = pd.DataFrame(dataset)
Lab 3: Finding Frequent Itemsets and Association Rules with FP Growth Algorithm
SOURCE CODE:
import pandas as pd
import streamlit as st
from mlxtend.frequent_patterns import apriori, association_rules
from mlxtend.preprocessing import TransactionEncoder
print("\nAssociation Rules:")
print(rules)
OUTPUT:
Name: Shristi Chapagain Date: 2081/06 /10
SOURCE CODE:
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from math import log2
from collections import Counter
from pprint import pprint
weighted_entropy = 0
for attr_val, subset in df_split:
subset_entropy = entropy_of_list(subset[target_attribute], attr_val)
weighted_entropy += (len(subset) / total_instances) * subset_entropy
return tree
OUTPUT:
{
"Yes":4
}
Classes ➡ Yes / Yes
{
"No":3
"Yes":2
}
Classes ➡ Yes / No
{
"Yes":3
"No":2
}
Classes ➡ Yes / No
{
"No":5
"Yes":9
}
Classes ➡ Yes / No
{
"Yes":3
"No":1
}
Classes ➡ Yes / No
{
"No":2
"Yes":2
}
Classes ➡ Yes / No
{
"Yes":4
"No":2
}
Classes ➡ Yes / No
{
"No":5
"Yes":9
}
Classes ➡ Yes / No
{
"No":4
"Yes":3
}
Classes ➡ Yes / No
{
"Yes":6
"No":1
}
Classes ➡ Yes / No
{
"No":5
"Yes":9
}
Classes ➡ Yes / No
{
"No":2
"Yes":6
}
Classes ➡ Yes / No
{
"No":3
"Yes":3
}
Classes ➡ Yes / No
{
"No":5
"Yes":9
}
Classes ➡ Yes / No
{
"Yes":1
}
Classes ➡ Yes / Yes
{
"No":2
}
Classes ➡ No / No
{
"No":1
"Yes":1
}
Classes ➡ Yes / No
Probabilities of Class 'p' = Yes ➡ 0.5
{
"No":3
"Yes":2
}
Classes ➡ Yes / No
{
"No":3
}
Classes ➡ No / No
{
"Yes":2
}
Classes ➡ Yes / Yes
{
"No":3
"Yes":2
}
Classes ➡ Yes / No
{
"No":2
"Yes":1
}
Classes ➡ Yes / No
{
"No":1
"Yes":1
}
Classes ➡ Yes / No
{
"No":3
"Yes":2
}
Classes ➡ Yes / No
{
"Yes":1
"No":1
}
Classes ➡ Yes / No
{
"Yes":2
"No":1
}
Classes ➡ Yes / No
Probabilities of Class 'p' = Yes ➡ 0.6666666666666666
{
"Yes":3
"No":2
}
Classes ➡ Yes / No
{
"Yes":1
"No":1
}
Classes ➡ Yes / No
{
"Yes":2
"No":1
}
Classes ➡ Yes / No
{
"Yes":3
"No":2
}
Classes ➡ Yes / No
{
"Yes":3
}
Classes ➡ Yes / Yes
{
"No":2
}
Classes ➡ No / No
{
"Yes":3
"No":2
}
Classes ➡ Yes / No
SOURCE CODE:
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier, plot_tree
import matplotlib.pyplot as plt
import matplotlib
####################################################################################
# Splitting the data into training and testing sets (70% train, 30% test)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=101)
print("\nTraining and testing sets created successfully!")
OUTPUT:
Terminal
Name: Shristi Chapagain Date: 2081/06/10
SOURCE
print("\nModel Evaluation:")
print(f"Accuracy: {accuracy * 100:.2f}%")
print(f"F1 Score: {f1:.2f}")
OUTPUT:
Terminal
Name: Shristi Chapagain Date: 2081/06/13
SOURCE
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from sklearn.cluster import KMeans
plt.figure(figsize=(10, 8))
for i in range(len(data)):
plt.scatter(
data[i][0],
data[i][1],
color=colors[labels[i]],
marker=markers[labels[i]],
s=30, # Size of each point
)
OUTPUT:
Terminal
Name: Shristi Chapagain Date: 2081/06/20
SOURCE
OUTPUT:
Terminal
Name: Shristi Chapagain Date: 2081/06/18
SOURCE