SVM K NN MLP With Sklearn Jupyter NoteBo
SVM K NN MLP With Sklearn Jupyter NoteBo
0/ Maintenance
General packages
In [ ]:
# basic maths
import numpy as np
# ploting
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
# other tools
from sklearn.multiclass import OneVsRestClassifier
# re-scaling values
from sklearn.preprocessing import StandardScaler
Plot parameters
In [ ]:
fig_width = 10
fig_height = fig_width/1.618
fig_resolution = fig_width*fig_height
Loading data
In [ ]:
# return_X_y = True :
# X = the data
# y = the targets
# as_frame :
# the values are represented as Pandas DataFrames
X,y = load_iris(return_X_y=True,as_frame=True)
colors = np.array(y)
class_names = ["setosa","versicolor","virginica"]
1/ Data Importation
In [ ]:
Out[ ]:
sepal length sepal width petal length petal width
(cm) (cm) (cm) (cm)
In [ ]:
2/ Data analysis
if draw_plot ==1:
plt.subplot(3,2,1)
plt.scatter(PL,SL,c=colors)
plt.xlabel("petal length [cm]")
plt.ylabel("sepal length [cm]")
plt.subplot(3,2,2)
plt.scatter(PW,SL,c=colors)
plt.xlabel("petal width [cm]")
plt.ylabel("sepal length [cm]")
plt.subplot(3,2,3)
plt.scatter(PL,SW,c=colors)
plt.xlabel("petal length [cm]")
plt.ylabel("sepal width [cm]")
plt.subplot(3,2,4)
plt.scatter(PW,SW,c=colors)
plt.xlabel("petal width [cm]")
plt.ylabel("sepal width [cm]")
plt.subplot(3,2,5)
plt.scatter(PW,PL,c=colors)
plt.xlabel("petal width [cm]")
plt.ylabel("petal length [cm]")
plt.subplot(3,2,6)
plt.scatter(SW,SL,c=colors)
plt.xlabel("sepal width [cm]")
plt.ylabel("sepal length [cm]")
plt.show()
if draw_plot == 1:
fig, axes = plt.subplots(nrows=2, ncols=2)
fig.set_figheight(fig_height*2)
fig.set_figwidth(fig_width*2)
X.plot(kind='line',ax=axes[0,0])
X.plot(kind='box',ax=axes[0,1])
X.plot(kind='density',ax=axes[1,0])
X.plot(kind='hist',ax=axes[1,1])
plt.show()
3/ Data pre-processing
scaler = StandardScaler()
X = scaler.fit_transform(X)
X = pd.DataFrame(X)
if draw_plot == 1:
fig, axes = plt.subplots(nrows=2, ncols=2)
fig.set_figheight(13)
fig.set_figwidth(21)
X.plot(kind='line',ax=axes[0,0])
X.plot(kind='box',ax=axes[0,1])
X.plot(kind='density',ax=axes[1,0])
X.plot(kind='hist',ax=axes[1,1])
plt.show()
Splitting the datasets into train and test sub-datasets
In [ ]:
4/ Training
Classifier definition
In [ ]:
classifier_SVM = svm.SVC(kernel="linear")
Training
In [ ]:
classifier_SVM.fit(X_train,y_train)
Out[ ]:
SVC(kernel='linear')
Classifier definition
In [ ]:
classifier_KNN = neighbors.KNeighborsClassifier(n_neighbors=8)
Training
In [ ]:
classifier_KNN.fit(X_train,y_train)
Out[ ]:
KNeighborsClassifier(n_neighbors=8)
Classifier definition
In [ ]:
classifier_MLP = MLPClassifier(
solver = "lbfgs",
activation = "relu",
alpha = 1e-5,
hidden_layer_sizes = (5,2),
)
Training
In [ ]:
classifier_MLP.fit(X_train,y_train)
Out[ ]:
MLPClassifier(alpha=1e-05, hidden_layer_sizes=(5, 2), solver='lbfgs')
5/ Evaluation
Predictions
predictions_SVM = classifier_SVM.predict(X_test)
predictions_KNN = classifier_KNN.predict(X_test)
predictions_MLP = classifier_MLP.predict(X_test)
Confusion matrix
Acc_SVM = np.round(accuracy(y_test,predictions_SVM),3)
Acc_KNN = np.round(accuracy(y_test,predictions_KNN),3)
Acc_MLP = np.round(accuracy(y_test,predictions_MLP),3)
Pre_SVM = np.round(precision(y_test,predictions_SVM,average="macro"),3)
Pre_KNN = np.round(precision(y_test,predictions_KNN,average="macro"),3)
Pre_MLP = np.round(precision(y_test,predictions_MLP,average="macro"),3)
Rcl_SVM = np.round(recall(y_test,predictions_SVM,average="macro"),3)
Rcl_KNN = np.round(recall(y_test,predictions_KNN,average="macro"),3)
Rcl_MLP = np.round(recall(y_test,predictions_MLP,average="macro"),3)
F1_SVM = np.round(f1(y_test,predictions_SVM,average="macro"),3)
F1_KNN = np.round(f1(y_test,predictions_KNN,average="macro"),3)
F1_MLP = np.round(f1(y_test,predictions_MLP,average="macro"),3)
Acc = ["Accuracy",Acc_SVM,Acc_KNN,Acc_MLP]
Rcl = ["Recall",Rcl_SVM,Rcl_KNN,Rcl_MLP]
Pre = ["Precision",Pre_SVM,Pre_KNN,Pre_MLP]
F1 = ["F1",F1_SVM,F1_KNN,F1_MLP]
Displaying results
using DataFrames
In [ ]:
Metrics = pd.DataFrame(np.zeros((3,4)))
Metrics.columns = ["metric","SVM","KNN","MLP"]
Metrics.loc[0] = Acc
Metrics.loc[1] = Rcl
Metrics.loc[2] = Pre
Metrics.loc[3] = F1
Metrics
Out[ ]:
metric SVM KNN MLP
6/ Optimisation
In [ ]:
a/ SVM
In [ ]:
gs_SVM = GridSearchCV(
svm.SVC(),
param_grid = {"kernel":["linear","poly","rbf","sigmoid"]},
scoring = scoring,
refit = False,
return_train_score = True,
cv = 5,
)
gs_SVM.fit(X_train,y_train)
results_SVM = gs_SVM.cv_results_
kernels = ["linear","poly","rbf","sigmoid"]
mean_fit_times = np.round(results_SVM['mean_fit_time']*1e3,2)
std_fit_times = np.round(results_SVM['std_fit_time']*1e3,2)
mean_score_times = np.round(results_SVM['mean_score_time']*1e3,2)
std_score_times = np.round(results_SVM['std_score_time']*1e3,2)
mean_test_accuracy = np.round(results_SVM['mean_test_Accuracy'],2)
std_test_accuracy = np.round(results_SVM['std_test_Accuracy'],2)
mean_test_precision = np.round(results_SVM['mean_test_Precision'],2)
std_test_precision = np.round(results_SVM['std_test_Precision'],2)
mean_test_recall = np.round(results_SVM['mean_test_Recall'],2)
std_test_recall = np.round(results_SVM['std_test_Recall'],2)
mean_test_f1 = np.round(results_SVM['mean_test_F1'],2)
std_test_f1 = np.round(results_SVM['std_test_F1'],2)
metrics_SVM = np.zeros((4,13))
metrics_SVM[:,1] = mean_fit_times
metrics_SVM[:,2] = std_fit_times
metrics_SVM[:,3] = mean_score_times
metrics_SVM[:,4] = std_score_times
metrics_SVM[:,5] = mean_test_accuracy
metrics_SVM[:,6] = std_test_accuracy
metrics_SVM[:,7] = mean_test_precision
metrics_SVM[:,8] = std_test_precision
metrics_SVM[:,9] = mean_test_accuracy
metrics_SVM[:,10] = std_test_accuracy
metrics_SVM[:,11] = mean_test_precision
metrics_SVM[:,12] = std_test_precision
metrics_SVM = pd.DataFrame(metrics_SVM)
metrics_SVM.columns = [
"kernel",
"mean fit times [ms]",
"std fit times [ms]",
"mean score times [ms]",
"std score times [ms]",
"mean accuracy",
"std accuracy",
"mean precision",
"std precision",
"mean recall",
"std recall",
"mean F1",
"std F1",
]
metrics_SVM["kernel"]=kernels
metrics_SVM
Out[ ]:
mean fit times std fit times mean score std score times mean std mean std mean std mean std
kernel
[ms] [ms] times [ms] [ms] accuracy accuracy precision precision recall recall F1 F1
0 linear 1.90 0.65 3.17 0.38 0.95 0.03 0.95 0.03 0.95 0.03 0.95 0.03
1 poly 1.80 0.24 3.20 0.27 0.88 0.04 0.91 0.03 0.88 0.04 0.91 0.03
2 rbf 1.80 0.15 3.21 0.16 0.93 0.05 0.93 0.05 0.93 0.05 0.93 0.05
3 sigmoid 1.53 0.16 2.78 0.25 0.93 0.07 0.94 0.06 0.93 0.07 0.94 0.06
In [ ]:
xx = [0,1,2,3]
ylim = [0.7,1]
plt.subplot(2,2,1)
plt.scatter(metrics_SVM["kernel"], metrics_SVM["mean precision"])
plt.ylabel('mean PRECISION')
plt.gca().fill_between(
xx,
metrics_SVM["mean precision"]-metrics_SVM["std precision"],
metrics_SVM["mean precision"]+metrics_SVM["std precision"],
alpha = 0.1,
label = "uncertainty")
plt.xticks(xx,kernels)
plt.ylim(ylim)
plt.legend(loc="lower right")
plt.subplot(2,2,2)
plt.scatter(metrics_SVM["kernel"], metrics_SVM["mean recall"])
plt.ylabel('mean RECALL')
plt.gca().fill_between(
xx,
metrics_SVM["mean recall"]-metrics_SVM["std recall"],
metrics_SVM["mean recall"]+metrics_SVM["std recall"],
alpha = 0.1,
label = "uncertainty")
plt.xticks(xx,kernels)
plt.ylim(ylim)
plt.legend(loc="lower right")
plt.subplot(2,2,3)
plt.scatter(metrics_SVM["kernel"], metrics_SVM["mean accuracy"])
plt.xlabel('kernel type')
plt.ylabel('mean ACCURACY')
plt.gca().fill_between(
xx,
metrics_SVM["mean accuracy"]-metrics_SVM["std accuracy"],
metrics_SVM["mean accuracy"]+metrics_SVM["std accuracy"],
alpha = 0.1,
label = "uncertainty")
plt.xticks(xx,kernels)
plt.ylim(ylim)
plt.legend(loc="lower right")
plt.subplot(2,2,4)
plt.scatter(metrics_SVM["kernel"], metrics_SVM["mean F1"])
plt.xlabel('kernel type')
plt.ylabel('mean F1')
plt.gca().fill_between(
xx,
metrics_SVM["mean F1"]-metrics_SVM["std F1"],
metrics_SVM["mean F1"]+metrics_SVM["std F1"],
alpha = 0.1,
label = "uncertainty")
plt.xticks(xx,kernels)
plt.ylim(ylim)
plt.legend(loc="lower right")
plt.show()
In [ ]:
plt.gca().fill_between(
xx,
metrics_SVM["mean score times [ms]"]-metrics_SVM["std score times [ms]"],
metrics_SVM["mean score times [ms]"]+metrics_SVM["std score times [ms]"],
color = "green",
alpha = 0.1,
label = "uncertainty score time [ms]")
plt.legend(ncol=2)
plt.xticks(xx,kernels)
plt.show()
b/ K-NN
In [ ]:
gs_KNN = GridSearchCV(
neighbors.KNeighborsClassifier(),
param_grid = {"n_neighbors":[2,3,4,5,6,7,8,9,10,11,12,13,14,15]},
scoring = scoring,
refit = False,
return_train_score = True,
cv = 5,
)
gs_KNN.fit(X_train,y_train)
results_KNN = gs_KNN.cv_results_
metrics_KNN = np.zeros((14,13))
metrics_KNN[:,0] = neighbours
metrics_KNN[:,1] = mean_fit_times
metrics_KNN[:,2] = std_fit_times
metrics_KNN[:,3] = mean_score_times
metrics_KNN[:,4] = std_score_times
metrics_KNN[:,5] = mean_test_accuracy
metrics_KNN[:,6] = std_test_accuracy
metrics_KNN[:,7] = mean_test_precision
metrics_KNN[:,8] = std_test_precision
metrics_KNN[:,9] = mean_test_accuracy
metrics_KNN[:,10] = std_test_accuracy
metrics_KNN[:,11] = mean_test_precision
metrics_KNN[:,12] = std_test_precision
metrics_KNN = pd.DataFrame(metrics_KNN)
metrics_KNN.columns = [
"neighbours",
"mean fit times [ms]",
"std fit times [ms]",
"mean score times [ms]",
"std score times [ms]",
"mean accuracy",
"std accuracy",
"mean precision",
"std precision",
"mean recall",
"std recall",
"mean F1",
"std F1",
]
metrics_KNN
Out[ ]:
mean fit std fit times mean score std score mean std mean std mean std mean std
neighbours
times [ms] [ms] times [ms] times [ms] accuracy accuracy precision precision recall recall F1 F1
0 2.0 1.56 0.15 3.94 0.44 0.90 0.06 0.91 0.06 0.90 0.06 0.91 0.06
1 3.0 1.54 0.12 3.96 0.28 0.90 0.03 0.91 0.03 0.90 0.03 0.91 0.03
2 4.0 1.16 0.03 3.03 0.03 0.88 0.05 0.89 0.05 0.88 0.05 0.89 0.05
3 5.0 1.19 0.11 3.04 0.05 0.93 0.04 0.94 0.04 0.93 0.04 0.94 0.04
4 6.0 1.14 0.01 3.16 0.27 0.93 0.02 0.94 0.03 0.93 0.02 0.94 0.03
5 7.0 1.14 0.00 3.09 0.14 0.93 0.05 0.93 0.05 0.93 0.05 0.93 0.05
6 8.0 1.13 0.00 3.02 0.01 0.92 0.04 0.92 0.04 0.92 0.04 0.92 0.04
7 9.0 1.14 0.01 3.03 0.01 0.93 0.05 0.93 0.05 0.93 0.05 0.93 0.05
8 10.0 1.21 0.14 3.25 0.42 0.93 0.05 0.93 0.05 0.93 0.05 0.93 0.05
9 11.0 1.14 0.00 3.03 0.01 0.93 0.05 0.93 0.05 0.93 0.05 0.93 0.05
10 12.0 1.14 0.00 3.05 0.01 0.93 0.05 0.93 0.05 0.93 0.05 0.93 0.05
11 13.0 1.20 0.13 3.06 0.03 0.95 0.03 0.95 0.03 0.95 0.03 0.95 0.03
12 14.0 1.14 0.00 3.05 0.02 0.93 0.05 0.93 0.05 0.93 0.05 0.93 0.05
13 15.0 1.13 0.01 3.03 0.01 0.91 0.05 0.92 0.05 0.91 0.05 0.92 0.05
In [ ]:
ylim = [0.7,1]
plt.subplot(2,2,1)
plt.scatter(metrics_KNN["neighbours"], metrics_KNN["mean precision"])
plt.ylabel('mean PRECISION')
plt.gca().fill_between(
np.linspace(2,15,14),
metrics_KNN["mean precision"]-metrics_KNN["std precision"],
metrics_KNN["mean precision"]+metrics_KNN["std precision"],
alpha = 0.1,
label = "uncertainty")
plt.ylim(ylim)
plt.legend()
plt.subplot(2,2,2)
plt.scatter(metrics_KNN["neighbours"], metrics_KNN["mean recall"])
plt.ylabel('mean RECALL')
plt.gca().fill_between(
np.linspace(2,15,14),
metrics_KNN["mean recall"]-metrics_KNN["std recall"],
metrics_KNN["mean recall"]+metrics_KNN["std recall"],
alpha = 0.1,
label = "uncertainty")
plt.ylim(ylim)
plt.legend()
plt.subplot(2,2,3)
plt.scatter(metrics_KNN["neighbours"], metrics_KNN["mean accuracy"])
plt.xlabel('number of neighbours')
plt.ylabel('mean ACCURACY')
plt.gca().fill_between(
np.linspace(2,15,14),
metrics_KNN["mean accuracy"]-metrics_KNN["std accuracy"],
metrics_KNN["mean accuracy"]+metrics_KNN["std accuracy"],
alpha = 0.1,
label = "uncertainty")
plt.ylim(ylim)
plt.legend()
plt.subplot(2,2,4)
plt.scatter(metrics_KNN["neighbours"], metrics_KNN["mean F1"])
plt.xlabel('number of neighbours')
plt.ylabel('mean F1')
plt.gca().fill_between(
np.linspace(2,15,14),
metrics_KNN["mean F1"]-metrics_KNN["std F1"],
metrics_KNN["mean F1"]+metrics_KNN["std F1"],
alpha = 0.1,
label = "uncertainty")
plt.ylim(ylim)
plt.legend()
plt.show()
In [ ]:
gs_MLP = GridSearchCV(
MLPClassifier(),
param_grid = {"solver":['sgd','lbfgs','adam']},
scoring = scoring,
refit = False,
return_train_score = True,
cv = 5,
)
gs_MLP.fit(X_train,y_train)
results_MLP = gs_MLP.cv_results_
solvers = ['sgd','lbfgs','adam']
mean_fit_times = np.round(results_MLP['mean_fit_time']*1e3,2)
std_fit_times = np.round(results_MLP['std_fit_time']*1e3,2)
mean_score_times = np.round(results_MLP['mean_score_time']*1e3,2)
std_score_times = np.round(results_MLP['std_score_time']*1e3,2)
mean_test_accuracy = np.round(results_MLP['mean_test_Accuracy'],2)
std_test_accuracy = np.round(results_MLP['std_test_Accuracy'],2)
mean_test_precision = np.round(results_MLP['mean_test_Precision'],2)
std_test_precision = np.round(results_MLP['std_test_Precision'],2)
mean_test_recall = np.round(results_MLP['mean_test_Recall'],2)
std_test_recall = np.round(results_MLP['std_test_Recall'],2)
mean_test_f1 = np.round(results_MLP['mean_test_F1'],2)
std_test_f1 = np.round(results_MLP['std_test_F1'],2)
metrics_MLP = np.zeros((3,13))
metrics_MLP[:,0] = [1,2,3]
metrics_MLP[:,1] = mean_fit_times
metrics_MLP[:,2] = std_fit_times
metrics_MLP[:,3] = mean_score_times
metrics_MLP[:,4] = std_score_times
metrics_MLP[:,5] = mean_test_accuracy
metrics_MLP[:,6] = std_test_accuracy
metrics_MLP[:,7] = mean_test_precision
metrics_MLP[:,8] = std_test_precision
metrics_MLP[:,9] = mean_test_accuracy
metrics_MLP[:,10] = std_test_accuracy
metrics_MLP[:,11] = mean_test_precision
metrics_MLP[:,12] = std_test_precision
metrics_MLP = pd.DataFrame(metrics_MLP)
metrics_MLP.columns = [
"solver",
"mean fit times [ms]",
"std fit times [ms]",
"mean score times [ms]",
"std score times [ms]",
"mean accuracy",
"std accuracy",
"mean precision",
"std precision",
"mean recall",
"std recall",
"mean F1",
"std F1",
]
metrics_MLP['solver'] = solvers
metrics_MLP
0 sgd 75.16 4.90 2.68 0.18 0.80 0.04 0.85 0.04 0.80 0.04 0.85 0.04
1 lbfgs 16.92 9.76 2.42 0.03 0.91 0.06 0.92 0.06 0.91 0.06 0.92 0.06
2 adam 78.28 0.39 2.66 0.09 0.91 0.05 0.91 0.05 0.91 0.05 0.91 0.05
In [ ]:
xx = [0,1,2]
plt.subplot(2,2,1)
plt.scatter(metrics_MLP["solver"], metrics_MLP["mean precision"])
plt.ylabel('mean PRECISION')
plt.gca().fill_between(
xx,
metrics_MLP["mean precision"]-metrics_MLP["std precision"],
metrics_MLP["mean precision"]+metrics_MLP["std precision"],
alpha = 0.1,
label = "uncertainty")
plt.xticks(xx,solvers)
plt.ylim(ylim)
plt.legend(loc="lower right")
plt.subplot(2,2,2)
plt.scatter(metrics_MLP["solver"], metrics_MLP["mean recall"])
plt.ylabel('mean RECALL')
plt.gca().fill_between(
xx,
metrics_MLP["mean recall"]-metrics_MLP["std recall"],
metrics_MLP["mean recall"]+metrics_MLP["std recall"],
alpha = 0.1,
label = "uncertainty")
plt.xticks(xx,solvers)
plt.ylim(ylim)
plt.legend(loc="lower right")
plt.subplot(2,2,3)
plt.scatter(metrics_MLP["solver"], metrics_MLP["mean accuracy"])
plt.xlabel('solver method')
plt.ylabel('mean ACCURACY')
plt.gca().fill_between(
xx,
metrics_MLP["mean accuracy"]-metrics_MLP["std accuracy"],
metrics_MLP["mean accuracy"]+metrics_MLP["std accuracy"],
alpha = 0.1,
label = "uncertainty")
plt.xticks(xx,solvers)
plt.ylim(ylim)
plt.legend(loc="lower right")
plt.subplot(2,2,4)
plt.scatter(metrics_MLP["solver"], metrics_MLP["mean F1"])
plt.xlabel('solver method')
plt.ylabel('mean F1')
plt.gca().fill_between(
xx,
metrics_MLP["mean F1"]-metrics_MLP["std F1"],
metrics_MLP["mean F1"]+metrics_MLP["std F1"],
alpha = 0.1,
label = "uncertainty")
plt.xticks(xx,solvers)
plt.ylim(ylim)
plt.legend(loc="lower right")
plt.show()
In [ ]:
plt.gca().fill_between(
xx,
metrics_MLP["mean score times [ms]"]-metrics_MLP["std score times [ms]"],
metrics_MLP["mean score times [ms]"]+metrics_MLP["std score times [ms]"],
color = "green",
alpha = 0.1,
label = "uncertainty score time [ms]")
plt.legend(ncol=2)
plt.xticks(xx,solvers)
plt.show()
c/ MLP - activation
In [ ]:
gs_MLP = GridSearchCV(
MLPClassifier(),
param_grid = {"activation":['identity','logistic','relu','tanh','softmax']},
scoring = scoring,
refit = False,
return_train_score = True,
cv = 5,
)
gs_MLP.fit(X_train,y_train)
results_MLP = gs_MLP.cv_results_
method = ['identity','logistic','relu','tanh','softmax']
mean_fit_times = np.round(results_MLP['mean_fit_time']*1e3,2)
std_fit_times = np.round(results_MLP['std_fit_time']*1e3,2)
mean_score_times = np.round(results_MLP['mean_score_time']*1e3,2)
std_score_times = np.round(results_MLP['std_score_time']*1e3,2)
mean_test_accuracy = np.round(results_MLP['mean_test_Accuracy'],2)
std_test_accuracy = np.round(results_MLP['std_test_Accuracy'],2)
mean_test_precision = np.round(results_MLP['mean_test_Precision'],2)
std_test_precision = np.round(results_MLP['std_test_Precision'],2)
mean_test_recall = np.round(results_MLP['mean_test_Recall'],2)
std_test_recall = np.round(results_MLP['std_test_Recall'],2)
mean_test_f1 = np.round(results_MLP['mean_test_F1'],2)
std_test_f1 = np.round(results_MLP['std_test_F1'],2)
metrics_MLP = np.zeros((5,13))
metrics_MLP[:,1] = mean_fit_times
metrics_MLP[:,2] = std_fit_times
metrics_MLP[:,3] = mean_score_times
metrics_MLP[:,4] = std_score_times
metrics_MLP[:,5] = mean_test_accuracy
metrics_MLP[:,6] = std_test_accuracy
metrics_MLP[:,7] = mean_test_precision
metrics_MLP[:,8] = std_test_precision
metrics_MLP[:,9] = mean_test_accuracy
metrics_MLP[:,10] = std_test_accuracy
metrics_MLP[:,11] = mean_test_precision
metrics_MLP[:,12] = std_test_precision
metrics_MLP = pd.DataFrame(metrics_MLP)
metrics_MLP.columns = [
"method",
"mean fit times [ms]",
"std fit times [ms]",
"mean score times [ms]",
"std score times [ms]",
"mean accuracy",
"std accuracy",
"mean precision",
"std precision",
"mean recall",
"std recall",
"mean F1",
"std F1",
]
metrics_MLP['method'] = method
metrics_MLP
Out[ ]:
mean fit times std fit times mean score std score times mean std mean std mean std mean std
method
[ms] [ms] times [ms] [ms] accuracy accuracy precision precision recall recall F1 F1
0 identity 65.29 3.73 2.75 0.18 0.93 0.04 0.93 0.04 0.93 0.04 0.93 0.04
1 logistic 75.33 0.99 2.62 0.17 0.87 0.07 0.89 0.05 0.87 0.07 0.89 0.05
2 relu 78.34 0.80 2.56 0.11 0.92 0.05 0.92 0.05 0.92 0.05 0.92 0.05
3 tanh 93.28 0.99 2.58 0.12 0.93 0.05 0.93 0.05 0.93 0.05 0.93 0.05
4 softmax 1.71 0.03 0.00 0.00 NaN NaN NaN NaN NaN NaN NaN NaN
In [ ]:
xx = [0,1,2,3,4]
ylim = [0.7,1]
plt.subplot(2,2,1)
plt.scatter(metrics_MLP["method"], metrics_MLP["mean precision"])
plt.ylabel('mean PRECISION')
plt.gca().fill_between(
xx,
metrics_MLP["mean precision"]-metrics_MLP["std precision"],
metrics_MLP["mean precision"]+metrics_MLP["std precision"],
alpha = 0.1,
label = "uncertainty")
plt.xticks(xx,method)
plt.ylim(ylim)
plt.legend(loc="lower right")
plt.subplot(2,2,2)
plt.scatter(metrics_MLP["method"], metrics_MLP["mean recall"])
plt.ylabel('mean RECALL')
plt.gca().fill_between(
xx,
metrics_MLP["mean recall"]-metrics_MLP["std recall"],
metrics_MLP["mean recall"]+metrics_MLP["std recall"],
alpha = 0.1,
label = "uncertainty")
plt.xticks(xx,method)
plt.ylim(ylim)
plt.legend(loc="lower right")
plt.subplot(2,2,3)
plt.scatter(metrics_MLP["method"], metrics_MLP["mean accuracy"])
plt.xlabel('activation method')
plt.ylabel('mean ACCURACY')
plt.gca().fill_between(
xx,
metrics_MLP["mean accuracy"]-metrics_MLP["std accuracy"],
metrics_MLP["mean accuracy"]+metrics_MLP["std accuracy"],
alpha = 0.1,
label = "uncertainty")
plt.xticks(xx,method)
plt.ylim(ylim)
plt.legend(loc="lower right")
plt.subplot(2,2,4)
plt.scatter(metrics_MLP["method"], metrics_MLP["mean F1"])
plt.xlabel('activation method')
plt.ylabel('mean F1')
plt.gca().fill_between(
xx,
metrics_MLP["mean F1"]-metrics_MLP["std F1"],
metrics_MLP["mean F1"]+metrics_MLP["std F1"],
alpha = 0.1,
label = "uncertainty")
plt.xticks(xx,method)
plt.ylim(ylim)
plt.legend(loc="lower right")
plt.show()
In [ ]:
plt.gca().fill_between(
xx,
metrics_MLP["mean score times [ms]"]-metrics_MLP["std score times [ms]"],
metrics_MLP["mean score times [ms]"]+metrics_MLP["std score times [ms]"],
color = "green",
alpha = 0.1,
label = "uncertainty score time [ms]")
plt.legend(loc="center left")
plt.xticks(xx,method)
plt.show()
In [ ]: