AAM PR QB
AAM PR QB
# Create a DataFrame
df = pd.DataFrame(data=iris.data)
df['species'] = iris.target
# Basic information
print("\n Dataset Information:")
print(df.info())
# Statistical summary
print("\n Statistical Summary:")
print(df.describe())
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Dataset information
print("\n🔹 Dataset Info:")
print(df.info())
# Summary statistics
print("\n🔹 Statistical Summary:")
print(df.describe(include='all'))
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# Summary statistics
print("\n Summary Statistics:")
print(df.describe)
# Missing values
print("\n Missing Values in Each Column:")
print(df.isnull().sum())
# Survival counts
print("\n Survival Counts:")
print(df['2urvived'].value_counts())
import pandas as pd
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import SelectFromModel
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import SelectFromModel
print(data.head())
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score, confusion_matrix,
classification_report
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score, confusion_matrix,
classification_report
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score, confusion_matrix
label_encoder = LabelEncoder()
# Apply LabelEncoder to each column
for column in df.columns:
df[column] = label_encoder.fit_transform(df[column])
# Step 3: Split the data into features (X) and target (y)
X = df.drop(columns=['class']) # Features (all columns except 'class')
y = df['class'] # Target (the 'class' column contains poisonous or edible)
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from os import rename
print(df.describe)
df.dropna
df.rename(columns={'2urvived': 'survived'}, inplace=True)
# Make predictions
y_pred = model.predict(X_test)
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import mean_absolute_error
import matplotlib.pyplot as plt
import numpy as np
# Sigmoid activation and its derivative
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(x):
return x * (1 - x)
# Weights
wh = np.random.uniform(size=(input_layer_neurons, hidden_layer_neurons)) # 2x2
bh = np.random.uniform(size=(1, hidden_layer_neurons)) # 1x2
wo = np.random.uniform(size=(hidden_layer_neurons, output_neurons)) # 2x1
bo = np.random.uniform(size=(1, output_neurons)) # 1x1
# Training loop
for epoch in range(10000):
error_hidden = d_output.dot(wo.T)
d_hidden = error_hidden * sigmoid_derivative(hidden_output)
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
# Load dataset
data = pd.read_csv("spotify-2023.csv", encoding='ISO-8859-1')
print(data.head())
# Apply K-Means
kmeans = KMeans(n_clusters=3)
clusters = kmeans.fit_predict(data_scaled)
# Plot
plt.scatter(pca_data[:, 0], pca_data[:, 1], c=clusters)
plt.title("Song Clusters")
plt.xlabel("PCA 1")
plt.ylabel("PCA 2")
plt.show()
# Step 1: Data
weather = ['Rainy', 'Sunny', 'Overcast', 'Overcast', 'Sunny', 'Rainy']
temp = ['Yes', 'Yes', 'Yes', 'Yes', 'No', 'Yes']
# Prior probabilities
p_yes = total_yes / total
p_no = total_no / total
# Likelihoods
# P(Weather | Temperature)
p_sunny_given_yes = 1 / total_yes # 1 Sunny with Yes
p_sunny_given_no = 1 / total_no # 1 Sunny with No