DSM 3
DSM 3
def read_dataset(filename):
df = pd.read_csv(filename)
matrix = df.to_numpy()
return matrix
csv_file = "iris.csv"
dataset = read_dataset(csv_file)
1
[6.3 3.3 6.0 2.5 'Virginica']
[5.8 2.7 5.1 1.9 'Virginica']
[7.1 3.0 5.9 2.1 'Virginica']
[6.3 2.9 5.6 1.8 'Virginica']
[6.5 3.0 5.8 2.2 'Virginica']
[7.6 3.0 6.6 2.1 'Virginica']
[4.9 2.5 4.5 1.7 'Virginica']
[7.3 2.9 6.3 1.8 'Virginica']
[6.7 2.5 5.8 1.8 'Virginica']
[7.2 3.6 6.1 2.5 'Virginica']]
0.2 2.a Calculate Data mean for each attribute and represent it as a vector
[216]: def calculate_data_mean(filename):
df = pd.read_csv(filename)
mean_vector = df.mean(numeric_only=True)
return mean_vector
csv_file = "iris.csv"
mean_vector = calculate_data_mean(csv_file)
print("Mean Vector:")
print(mean_vector)
Mean Vector:
sepal.length 5.95
sepal.width 3.07
petal.length 3.86
petal.width 1.22
dtype: float64
2
0.5 2.d Calculate Chebyshev distance between two data objects
[206]: def Chebychev_distance(vec1,vec2):
dist= np.max(np.absolute(np.array(vec1) - np.array(vec2)))
return dist
mahalanobis_distance = np.sqrt(mahalanobis_sq)
return mahalanobis_distance
iris_data = pd.read_csv('iris.csv')
columns = ['sepal.length', 'sepal.width', 'petal.length', 'petal.width']
iris_subset = iris_data[columns]
# ref https://www.machinelearningplus.com/statistics/mahalanobis-distance/
3
distances.append((dist, labels[i]))
distances.sort()
k_nearest = distances[:k]
k_nearest_labels = [label for (_, label) in k_nearest]
most_common = Counter(k_nearest_labels).most_common(1)
predicted_label = most_common[0][0]
return predicted_label
iris_data = pd.read_csv('iris.csv')
feature_columns = ['sepal.length', 'sepal.width', 'petal.length', 'petal.width']
iris_features = iris_data[feature_columns]
iris_labels = iris_data['variety']
random_point = np.array([6.1, 2.9, 4.7, 1.3])
k = 5 # Number of nearest neighbors to consider
4
elif distance_metric == 'manhattan':
for centroid in centroids:
distances.append(manhattan_distance(point, centroid))
elif distance_metric == 'chebyshev':
for centroid in centroids:
distances.append(chebyshev_distance(point, centroid))
elif distance_metric == 'euclidean':
for centroid in centroids:
distances.append(euclidean_distance(point, centroid))
cluster_labels[i] = np.argmin(distances)
return cluster_labels
5
return dist
k = 3
distance_metrics = ['mahalanobis', 'manhattan', 'chebyshev', 'euclidean']
for metric in distance_metrics:
cluster_labels, centroids = kmeans(iris_data, k, distance_metric=metric)
print(f"Distance Metric: {metric}")
print("Cluster Labels:")
print(cluster_labels)
print("Centroids:")
print(centroids)
print()