ML File
ML File
ML File
import csv
def find_s(data):
most_specific = data[0][1:]
if more_general(current, most_specific):
most_specific = current
return most_specific
reader = csv.reader(file)
data = list(reader)
import csv
def find_s(data):
most_specific = data[0][1:]
if more_general(current, most_specific):
most_specific = current
return most_specific
def find_g(data):
most_general = current
return most_general
reader = csv.reader(file)
data = list(reader)
s, g = find_s(data), find_g(data)
import math
val_freq = {}
val_freq[record[target_attr]] = val_freq.get(record[target_attr], 0) + 1
if base_entropy == 0:
elif len(attrs) == 0:
else:
selected_attr = attrs[attr_gains.index(max(attr_gains))]
new_attrs = attrs.copy()
new_attrs.remove(selected_attr)
child_node = id3(new_data, new_attrs, target_attr)
node[selected_attr][value] = child_node
return node
value = sample[attributes.index(attr)]
if value in values:
child = values[value]
if isinstance(child, dict):
else:
return child
data = [['Sunny', 'Hot', 'High', 'False'], ['Sunny', 'Hot', 'High', 'True'], ['Overcast', 'Hot', 'High',
'False'], ['Rain', 'Mild', 'High', 'False'], ['Rain', 'Cool', 'Normal', 'False'], ['Rain', 'Cool', 'Normal',
'True'], ['Overcast', 'Cool', 'Normal', 'True'], ['Sunny', 'Mild', 'High', 'False'], ['Sunny', 'Cool',
'Normal', 'False'], ['Rain', 'Mild', 'Normal', 'False'], ['Sunny', 'Mild', 'Normal', 'True'],
['Overcast', 'Mild', 'High', 'True'], ['Overcast', 'Hot', 'Normal', 'False'], ['Rain', 'Mild', 'High',
'True']]
import numpy as np
iris = datasets.load_iris()
X, y = iris.data, iris.target
y = OneHotEncoder().fit_transform(y.reshape(-1, 1)).toarray()
X_train = StandardScaler().fit_transform(X_train)
X_test = StandardScaler().transform(X_test)
sigmoid_derivative = lambda x: x * (1 - x)
for _ in range(epochs):
error = y - layer2
W2 += learning_rate * W2_grad
b2 += learning_rate * b2_grad
W1 += learning_rate * W1_grad
b1 += learning_rate * b1_grad
import csv
import math
def load_data(filename):
def split_data(data):
def get_stats(feature):
feature_stats = [{label: [float(feat) if feat.isdigit() else feat for feat in feats] for label, feats
in zip([label] * len(features), zip(*features))} for label in set(labels)]
if isinstance(stats, tuple):
else:
training_data = load_data('training_data.csv')
test_data = load_data('test_data.csv')
model = BayesianModel([('age', 'num'), ('sex', 'num'), ('cp', 'num'), ('trestbps', 'num'), ('chol',
'num'), ('fbs', 'num'), ('restecg', 'num'), ('thalach', 'num'), ('exang', 'num'), ('oldpeak', 'num'),
('slope', 'num'), ('ca', 'num'), ('thal', 'num')])
estimator.estimate()
# Perform inference
inference = model.fit(heart_data)
patient_data = {'age': 50, 'sex': 1, 'cp': 3, 'trestbps': 130, 'chol': 250, 'fbs': 0, 'restecg': 0,
'thalach': 180, 'exang': 0, 'oldpeak': 0.8, 'slope': 2, 'ca': 0, 'thal': 3}
import pandas as pd
data = pd.read_csv('dataset.csv')
# EM Clustering
print("EM Clustering:")
em_labels = em.fit_predict(X)
# k-Means Clustering
print("\nk-Means Clustering:")
kmeans_labels = kmeans.fit_predict(X)
print("\nComparison:")
else:
iris = load_iris()
X, y = iris.data, iris.target
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
print(f"Accuracy: {accuracy:.2f}")
print("\nCorrect Predictions:")
for i in correct_indices:
print(f"Instance {i+1}: Predicted: {iris.target_names[y_pred[i]]} (Correct:
{iris.target_names[y_test[i]]})")
print("\nWrong Predictions:")
for i in wrong_indices:
import numpy as np
# Sample dataset
X = np.linspace(-3, 3, 50)
# LWR function
W = np.diag(weights)
return theta[1]
# LWR predictions
tau = 1.0
plt.figure(figsize=(10, 6))
plt.xlabel('X')
plt.ylabel('y')
plt.legend()
plt.show()