Skip to content

Commit 1a59331

Browse files
committed
Pushing the docs to dev/ for branch: main, commit b32e5c7ab3535694f3c9fc98ec0e2e3ed03cf923
1 parent 2a3510a commit 1a59331

File tree

1,337 files changed

+5451
-5351
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,337 files changed

+5451
-5351
lines changed

dev/.buildinfo

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# Sphinx build info version 1
22
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
3-
config: 0c8082022f005208422aa7a8498ba762
3+
config: f16a8334a27fd41e7952661bdfaa6ad3
44
tags: 645f666f9bcd5a90fca523b33c5a78b7

dev/_downloads/02d88d76c60b7397c8c6e221b31568dd/plot_grid_search_refit_callable.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ def best_low_complexity(cv_results):
8181
pipe = Pipeline(
8282
[
8383
("reduce_dim", PCA(random_state=42)),
84-
("classify", LinearSVC(random_state=42, C=0.01)),
84+
("classify", LinearSVC(random_state=42, C=0.01, dual="auto")),
8585
]
8686
)
8787

dev/_downloads/036b9372e2e7802453cbb994da7a6786/plot_linearsvc_support_vectors.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
plt.figure(figsize=(10, 5))
2121
for i, C in enumerate([1, 100]):
2222
# "hinge" is the standard SVM loss
23-
clf = LinearSVC(C=C, loss="hinge", random_state=42).fit(X, y)
23+
clf = LinearSVC(C=C, loss="hinge", random_state=42, dual="auto").fit(X, y)
2424
# obtain the support vectors through the decision function
2525
decision_function = clf.decision_function(X)
2626
# we can also calculate the decision function manually
Binary file not shown.

dev/_downloads/083d8568c199bebbc1a847fc6c917e9e/plot_kernel_approximation.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
},
4141
"outputs": [],
4242
"source": [
43-
"n_samples = len(digits.data)\ndata = digits.data / 16.0\ndata -= data.mean(axis=0)\n\n# We learn the digits on the first half of the digits\ndata_train, targets_train = (data[: n_samples // 2], digits.target[: n_samples // 2])\n\n\n# Now predict the value of the digit on the second half:\ndata_test, targets_test = (data[n_samples // 2 :], digits.target[n_samples // 2 :])\n# data_test = scaler.transform(data_test)\n\n# Create a classifier: a support vector classifier\nkernel_svm = svm.SVC(gamma=0.2)\nlinear_svm = svm.LinearSVC()\n\n# create pipeline from kernel approximation\n# and linear svm\nfeature_map_fourier = RBFSampler(gamma=0.2, random_state=1)\nfeature_map_nystroem = Nystroem(gamma=0.2, random_state=1)\nfourier_approx_svm = pipeline.Pipeline(\n [(\"feature_map\", feature_map_fourier), (\"svm\", svm.LinearSVC())]\n)\n\nnystroem_approx_svm = pipeline.Pipeline(\n [(\"feature_map\", feature_map_nystroem), (\"svm\", svm.LinearSVC())]\n)\n\n# fit and predict using linear and kernel svm:\n\nkernel_svm_time = time()\nkernel_svm.fit(data_train, targets_train)\nkernel_svm_score = kernel_svm.score(data_test, targets_test)\nkernel_svm_time = time() - kernel_svm_time\n\nlinear_svm_time = time()\nlinear_svm.fit(data_train, targets_train)\nlinear_svm_score = linear_svm.score(data_test, targets_test)\nlinear_svm_time = time() - linear_svm_time\n\nsample_sizes = 30 * np.arange(1, 10)\nfourier_scores = []\nnystroem_scores = []\nfourier_times = []\nnystroem_times = []\n\nfor D in sample_sizes:\n fourier_approx_svm.set_params(feature_map__n_components=D)\n nystroem_approx_svm.set_params(feature_map__n_components=D)\n start = time()\n nystroem_approx_svm.fit(data_train, targets_train)\n nystroem_times.append(time() - start)\n\n start = time()\n fourier_approx_svm.fit(data_train, targets_train)\n fourier_times.append(time() - start)\n\n fourier_score = fourier_approx_svm.score(data_test, targets_test)\n nystroem_score = nystroem_approx_svm.score(data_test, targets_test)\n nystroem_scores.append(nystroem_score)\n fourier_scores.append(fourier_score)\n\n# plot the results:\nplt.figure(figsize=(16, 4))\naccuracy = plt.subplot(121)\n# second y axis for timings\ntimescale = plt.subplot(122)\n\naccuracy.plot(sample_sizes, nystroem_scores, label=\"Nystroem approx. kernel\")\ntimescale.plot(sample_sizes, nystroem_times, \"--\", label=\"Nystroem approx. kernel\")\n\naccuracy.plot(sample_sizes, fourier_scores, label=\"Fourier approx. kernel\")\ntimescale.plot(sample_sizes, fourier_times, \"--\", label=\"Fourier approx. kernel\")\n\n# horizontal lines for exact rbf and linear kernels:\naccuracy.plot(\n [sample_sizes[0], sample_sizes[-1]],\n [linear_svm_score, linear_svm_score],\n label=\"linear svm\",\n)\ntimescale.plot(\n [sample_sizes[0], sample_sizes[-1]],\n [linear_svm_time, linear_svm_time],\n \"--\",\n label=\"linear svm\",\n)\n\naccuracy.plot(\n [sample_sizes[0], sample_sizes[-1]],\n [kernel_svm_score, kernel_svm_score],\n label=\"rbf svm\",\n)\ntimescale.plot(\n [sample_sizes[0], sample_sizes[-1]],\n [kernel_svm_time, kernel_svm_time],\n \"--\",\n label=\"rbf svm\",\n)\n\n# vertical line for dataset dimensionality = 64\naccuracy.plot([64, 64], [0.7, 1], label=\"n_features\")\n\n# legends and labels\naccuracy.set_title(\"Classification accuracy\")\ntimescale.set_title(\"Training times\")\naccuracy.set_xlim(sample_sizes[0], sample_sizes[-1])\naccuracy.set_xticks(())\naccuracy.set_ylim(np.min(fourier_scores), 1)\ntimescale.set_xlabel(\"Sampling steps = transformed feature dimension\")\naccuracy.set_ylabel(\"Classification accuracy\")\ntimescale.set_ylabel(\"Training time in seconds\")\naccuracy.legend(loc=\"best\")\ntimescale.legend(loc=\"best\")\nplt.tight_layout()\nplt.show()"
43+
"n_samples = len(digits.data)\ndata = digits.data / 16.0\ndata -= data.mean(axis=0)\n\n# We learn the digits on the first half of the digits\ndata_train, targets_train = (data[: n_samples // 2], digits.target[: n_samples // 2])\n\n\n# Now predict the value of the digit on the second half:\ndata_test, targets_test = (data[n_samples // 2 :], digits.target[n_samples // 2 :])\n# data_test = scaler.transform(data_test)\n\n# Create a classifier: a support vector classifier\nkernel_svm = svm.SVC(gamma=0.2)\nlinear_svm = svm.LinearSVC(dual=\"auto\")\n\n# create pipeline from kernel approximation\n# and linear svm\nfeature_map_fourier = RBFSampler(gamma=0.2, random_state=1)\nfeature_map_nystroem = Nystroem(gamma=0.2, random_state=1)\nfourier_approx_svm = pipeline.Pipeline(\n [(\"feature_map\", feature_map_fourier), (\"svm\", svm.LinearSVC(dual=\"auto\"))]\n)\n\nnystroem_approx_svm = pipeline.Pipeline(\n [(\"feature_map\", feature_map_nystroem), (\"svm\", svm.LinearSVC(dual=\"auto\"))]\n)\n\n# fit and predict using linear and kernel svm:\n\nkernel_svm_time = time()\nkernel_svm.fit(data_train, targets_train)\nkernel_svm_score = kernel_svm.score(data_test, targets_test)\nkernel_svm_time = time() - kernel_svm_time\n\nlinear_svm_time = time()\nlinear_svm.fit(data_train, targets_train)\nlinear_svm_score = linear_svm.score(data_test, targets_test)\nlinear_svm_time = time() - linear_svm_time\n\nsample_sizes = 30 * np.arange(1, 10)\nfourier_scores = []\nnystroem_scores = []\nfourier_times = []\nnystroem_times = []\n\nfor D in sample_sizes:\n fourier_approx_svm.set_params(feature_map__n_components=D)\n nystroem_approx_svm.set_params(feature_map__n_components=D)\n start = time()\n nystroem_approx_svm.fit(data_train, targets_train)\n nystroem_times.append(time() - start)\n\n start = time()\n fourier_approx_svm.fit(data_train, targets_train)\n fourier_times.append(time() - start)\n\n fourier_score = fourier_approx_svm.score(data_test, targets_test)\n nystroem_score = nystroem_approx_svm.score(data_test, targets_test)\n nystroem_scores.append(nystroem_score)\n fourier_scores.append(fourier_score)\n\n# plot the results:\nplt.figure(figsize=(16, 4))\naccuracy = plt.subplot(121)\n# second y axis for timings\ntimescale = plt.subplot(122)\n\naccuracy.plot(sample_sizes, nystroem_scores, label=\"Nystroem approx. kernel\")\ntimescale.plot(sample_sizes, nystroem_times, \"--\", label=\"Nystroem approx. kernel\")\n\naccuracy.plot(sample_sizes, fourier_scores, label=\"Fourier approx. kernel\")\ntimescale.plot(sample_sizes, fourier_times, \"--\", label=\"Fourier approx. kernel\")\n\n# horizontal lines for exact rbf and linear kernels:\naccuracy.plot(\n [sample_sizes[0], sample_sizes[-1]],\n [linear_svm_score, linear_svm_score],\n label=\"linear svm\",\n)\ntimescale.plot(\n [sample_sizes[0], sample_sizes[-1]],\n [linear_svm_time, linear_svm_time],\n \"--\",\n label=\"linear svm\",\n)\n\naccuracy.plot(\n [sample_sizes[0], sample_sizes[-1]],\n [kernel_svm_score, kernel_svm_score],\n label=\"rbf svm\",\n)\ntimescale.plot(\n [sample_sizes[0], sample_sizes[-1]],\n [kernel_svm_time, kernel_svm_time],\n \"--\",\n label=\"rbf svm\",\n)\n\n# vertical line for dataset dimensionality = 64\naccuracy.plot([64, 64], [0.7, 1], label=\"n_features\")\n\n# legends and labels\naccuracy.set_title(\"Classification accuracy\")\ntimescale.set_title(\"Training times\")\naccuracy.set_xlim(sample_sizes[0], sample_sizes[-1])\naccuracy.set_xticks(())\naccuracy.set_ylim(np.min(fourier_scores), 1)\ntimescale.set_xlabel(\"Sampling steps = transformed feature dimension\")\naccuracy.set_ylabel(\"Classification accuracy\")\ntimescale.set_ylabel(\"Training time in seconds\")\naccuracy.legend(loc=\"best\")\ntimescale.legend(loc=\"best\")\nplt.tight_layout()\nplt.show()"
4444
]
4545
},
4646
{

dev/_downloads/10bb40e21b74618cdeed618ff1eae595/plot_det.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
},
4141
"outputs": [],
4242
"source": [
43-
"from sklearn.ensemble import RandomForestClassifier\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.svm import LinearSVC\n\nclassifiers = {\n \"Linear SVM\": make_pipeline(StandardScaler(), LinearSVC(C=0.025)),\n \"Random Forest\": RandomForestClassifier(\n max_depth=5, n_estimators=10, max_features=1\n ),\n}"
43+
"from sklearn.ensemble import RandomForestClassifier\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.svm import LinearSVC\n\nclassifiers = {\n \"Linear SVM\": make_pipeline(StandardScaler(), LinearSVC(C=0.025, dual=\"auto\")),\n \"Random Forest\": RandomForestClassifier(\n max_depth=5, n_estimators=10, max_features=1\n ),\n}"
4444
]
4545
},
4646
{

dev/_downloads/12a392e818ac5fa47dd91461855f3f77/plot_linearsvc_support_vectors.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
},
1616
"outputs": [],
1717
"source": [
18-
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_blobs\nfrom sklearn.svm import LinearSVC\nfrom sklearn.inspection import DecisionBoundaryDisplay\n\nX, y = make_blobs(n_samples=40, centers=2, random_state=0)\n\nplt.figure(figsize=(10, 5))\nfor i, C in enumerate([1, 100]):\n # \"hinge\" is the standard SVM loss\n clf = LinearSVC(C=C, loss=\"hinge\", random_state=42).fit(X, y)\n # obtain the support vectors through the decision function\n decision_function = clf.decision_function(X)\n # we can also calculate the decision function manually\n # decision_function = np.dot(X, clf.coef_[0]) + clf.intercept_[0]\n # The support vectors are the samples that lie within the margin\n # boundaries, whose size is conventionally constrained to 1\n support_vector_indices = np.where(np.abs(decision_function) <= 1 + 1e-15)[0]\n support_vectors = X[support_vector_indices]\n\n plt.subplot(1, 2, i + 1)\n plt.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap=plt.cm.Paired)\n ax = plt.gca()\n DecisionBoundaryDisplay.from_estimator(\n clf,\n X,\n ax=ax,\n grid_resolution=50,\n plot_method=\"contour\",\n colors=\"k\",\n levels=[-1, 0, 1],\n alpha=0.5,\n linestyles=[\"--\", \"-\", \"--\"],\n )\n plt.scatter(\n support_vectors[:, 0],\n support_vectors[:, 1],\n s=100,\n linewidth=1,\n facecolors=\"none\",\n edgecolors=\"k\",\n )\n plt.title(\"C=\" + str(C))\nplt.tight_layout()\nplt.show()"
18+
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_blobs\nfrom sklearn.svm import LinearSVC\nfrom sklearn.inspection import DecisionBoundaryDisplay\n\nX, y = make_blobs(n_samples=40, centers=2, random_state=0)\n\nplt.figure(figsize=(10, 5))\nfor i, C in enumerate([1, 100]):\n # \"hinge\" is the standard SVM loss\n clf = LinearSVC(C=C, loss=\"hinge\", random_state=42, dual=\"auto\").fit(X, y)\n # obtain the support vectors through the decision function\n decision_function = clf.decision_function(X)\n # we can also calculate the decision function manually\n # decision_function = np.dot(X, clf.coef_[0]) + clf.intercept_[0]\n # The support vectors are the samples that lie within the margin\n # boundaries, whose size is conventionally constrained to 1\n support_vector_indices = np.where(np.abs(decision_function) <= 1 + 1e-15)[0]\n support_vectors = X[support_vector_indices]\n\n plt.subplot(1, 2, i + 1)\n plt.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap=plt.cm.Paired)\n ax = plt.gca()\n DecisionBoundaryDisplay.from_estimator(\n clf,\n X,\n ax=ax,\n grid_resolution=50,\n plot_method=\"contour\",\n colors=\"k\",\n levels=[-1, 0, 1],\n alpha=0.5,\n linestyles=[\"--\", \"-\", \"--\"],\n )\n plt.scatter(\n support_vectors[:, 0],\n support_vectors[:, 1],\n s=100,\n linewidth=1,\n facecolors=\"none\",\n edgecolors=\"k\",\n )\n plt.title(\"C=\" + str(C))\nplt.tight_layout()\nplt.show()"
1919
]
2020
}
2121
],

dev/_downloads/4186bc506946013950b224b06f827118/plot_iris_svc.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@
5050
C = 1.0 # SVM regularization parameter
5151
models = (
5252
svm.SVC(kernel="linear", C=C),
53-
svm.LinearSVC(C=C, max_iter=10000),
53+
svm.LinearSVC(C=C, max_iter=10000, dual="auto"),
5454
svm.SVC(kernel="rbf", gamma=0.7, C=C),
5555
svm.SVC(kernel="poly", degree=3, gamma="auto", C=C),
5656
)

dev/_downloads/51e6f272e94e3b63cfd48c4b41fbaa10/plot_feature_selection_pipeline.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
},
4141
"outputs": [],
4242
"source": [
43-
"from sklearn.feature_selection import SelectKBest, f_classif\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.svm import LinearSVC\n\nanova_filter = SelectKBest(f_classif, k=3)\nclf = LinearSVC()\nanova_svm = make_pipeline(anova_filter, clf)\nanova_svm.fit(X_train, y_train)"
43+
"from sklearn.feature_selection import SelectKBest, f_classif\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.svm import LinearSVC\n\nanova_filter = SelectKBest(f_classif, k=3)\nclf = LinearSVC(dual=\"auto\")\nanova_svm = make_pipeline(anova_filter, clf)\nanova_svm.fit(X_train, y_train)"
4444
]
4545
},
4646
{

dev/_downloads/5a7e586367163444711012a4c5214817/plot_feature_selection_pipeline.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@
4646
from sklearn.svm import LinearSVC
4747

4848
anova_filter = SelectKBest(f_classif, k=3)
49-
clf = LinearSVC()
49+
clf = LinearSVC(dual="auto")
5050
anova_svm = make_pipeline(anova_filter, clf)
5151
anova_svm.fit(X_train, y_train)
5252

dev/_downloads/62397dcd82eb2478e27036ac96fe2ab9/plot_feature_selection.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@
7676
from sklearn.preprocessing import MinMaxScaler
7777
from sklearn.svm import LinearSVC
7878

79-
clf = make_pipeline(MinMaxScaler(), LinearSVC())
79+
clf = make_pipeline(MinMaxScaler(), LinearSVC(dual="auto"))
8080
clf.fit(X_train, y_train)
8181
print(
8282
"Classification accuracy without selecting features: {:.3f}".format(
@@ -89,7 +89,9 @@
8989

9090
# %%
9191
# After univariate feature selection
92-
clf_selected = make_pipeline(SelectKBest(f_classif, k=4), MinMaxScaler(), LinearSVC())
92+
clf_selected = make_pipeline(
93+
SelectKBest(f_classif, k=4), MinMaxScaler(), LinearSVC(dual="auto")
94+
)
9395
clf_selected.fit(X_train, y_train)
9496
print(
9597
"Classification accuracy after univariate feature selection: {:.3f}".format(

dev/_downloads/67703ae8c65716668dd87c31a24a069b/plot_det.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@
6666
from sklearn.svm import LinearSVC
6767

6868
classifiers = {
69-
"Linear SVM": make_pipeline(StandardScaler(), LinearSVC(C=0.025)),
69+
"Linear SVM": make_pipeline(StandardScaler(), LinearSVC(C=0.025, dual="auto")),
7070
"Random Forest": RandomForestClassifier(
7171
max_depth=5, n_estimators=10, max_features=1
7272
),

dev/_downloads/6d4f620ec6653356eb970c2a6ed62081/plot_calibration_curve.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@
109109
},
110110
"outputs": [],
111111
"source": [
112-
"lr = LogisticRegression(C=1.0)\nsvc = NaivelyCalibratedLinearSVC(max_iter=10_000)\nsvc_isotonic = CalibratedClassifierCV(svc, cv=2, method=\"isotonic\")\nsvc_sigmoid = CalibratedClassifierCV(svc, cv=2, method=\"sigmoid\")\n\nclf_list = [\n (lr, \"Logistic\"),\n (svc, \"SVC\"),\n (svc_isotonic, \"SVC + Isotonic\"),\n (svc_sigmoid, \"SVC + Sigmoid\"),\n]"
112+
"lr = LogisticRegression(C=1.0)\nsvc = NaivelyCalibratedLinearSVC(max_iter=10_000, dual=\"auto\")\nsvc_isotonic = CalibratedClassifierCV(svc, cv=2, method=\"isotonic\")\nsvc_sigmoid = CalibratedClassifierCV(svc, cv=2, method=\"sigmoid\")\n\nclf_list = [\n (lr, \"Logistic\"),\n (svc, \"SVC\"),\n (svc_isotonic, \"SVC + Isotonic\"),\n (svc_sigmoid, \"SVC + Sigmoid\"),\n]"
113113
]
114114
},
115115
{
Binary file not shown.

dev/_downloads/74caedf3eb449b80f3f00e66c1c576bd/plot_discretization_classification.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def get_name(estimator):
7070
{"logisticregression__C": np.logspace(-1, 1, 3)},
7171
),
7272
(
73-
make_pipeline(StandardScaler(), LinearSVC(random_state=0)),
73+
make_pipeline(StandardScaler(), LinearSVC(random_state=0, dual="auto")),
7474
{"linearsvc__C": np.logspace(-1, 1, 3)},
7575
),
7676
(
@@ -88,7 +88,7 @@ def get_name(estimator):
8888
make_pipeline(
8989
StandardScaler(),
9090
KBinsDiscretizer(encode="onehot"),
91-
LinearSVC(random_state=0),
91+
LinearSVC(random_state=0, dual="auto"),
9292
),
9393
{
9494
"kbinsdiscretizer__n_bins": np.arange(5, 8),

dev/_downloads/757941223692da355c1f7de747af856d/plot_compare_calibration.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@
5151
},
5252
"outputs": [],
5353
"source": [
54-
"from sklearn.calibration import CalibrationDisplay\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\n\n# Create classifiers\nlr = LogisticRegression()\ngnb = GaussianNB()\nsvc = NaivelyCalibratedLinearSVC(C=1.0)\nrfc = RandomForestClassifier()\n\nclf_list = [\n (lr, \"Logistic\"),\n (gnb, \"Naive Bayes\"),\n (svc, \"SVC\"),\n (rfc, \"Random forest\"),\n]"
54+
"from sklearn.calibration import CalibrationDisplay\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\n\n# Create classifiers\nlr = LogisticRegression()\ngnb = GaussianNB()\nsvc = NaivelyCalibratedLinearSVC(C=1.0, dual=\"auto\")\nrfc = RandomForestClassifier()\n\nclf_list = [\n (lr, \"Logistic\"),\n (gnb, \"Naive Bayes\"),\n (svc, \"SVC\"),\n (rfc, \"Random forest\"),\n]"
5555
]
5656
},
5757
{

dev/_downloads/764d061a261a2e06ad21ec9133361b2d/plot_precision_recall.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
},
4141
"outputs": [],
4242
"source": [
43-
"from sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import LinearSVC\n\nclassifier = make_pipeline(StandardScaler(), LinearSVC(random_state=random_state))\nclassifier.fit(X_train, y_train)"
43+
"from sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import LinearSVC\n\nclassifier = make_pipeline(\n StandardScaler(), LinearSVC(random_state=random_state, dual=\"auto\")\n)\nclassifier.fit(X_train, y_train)"
4444
]
4545
},
4646
{
@@ -112,7 +112,7 @@
112112
},
113113
"outputs": [],
114114
"source": [
115-
"from sklearn.multiclass import OneVsRestClassifier\n\nclassifier = OneVsRestClassifier(\n make_pipeline(StandardScaler(), LinearSVC(random_state=random_state))\n)\nclassifier.fit(X_train, Y_train)\ny_score = classifier.decision_function(X_test)"
115+
"from sklearn.multiclass import OneVsRestClassifier\n\nclassifier = OneVsRestClassifier(\n make_pipeline(StandardScaler(), LinearSVC(random_state=random_state, dual=\"auto\"))\n)\nclassifier.fit(X_train, Y_train)\ny_score = classifier.decision_function(X_test)"
116116
]
117117
},
118118
{

dev/_downloads/85db957603c93bd3e0a4265ea6565b13/plot_calibration_curve.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,7 @@ def predict_proba(self, X):
222222
# %%
223223

224224
lr = LogisticRegression(C=1.0)
225-
svc = NaivelyCalibratedLinearSVC(max_iter=10_000)
225+
svc = NaivelyCalibratedLinearSVC(max_iter=10_000, dual="auto")
226226
svc_isotonic = CalibratedClassifierCV(svc, cv=2, method="isotonic")
227227
svc_sigmoid = CalibratedClassifierCV(svc, cv=2, method="sigmoid")
228228

dev/_downloads/98161c8b335acb98de356229c1005819/plot_precision_recall.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,9 @@
123123
from sklearn.preprocessing import StandardScaler
124124
from sklearn.svm import LinearSVC
125125

126-
classifier = make_pipeline(StandardScaler(), LinearSVC(random_state=random_state))
126+
classifier = make_pipeline(
127+
StandardScaler(), LinearSVC(random_state=random_state, dual="auto")
128+
)
127129
classifier.fit(X_train, y_train)
128130

129131
# %%
@@ -187,7 +189,7 @@
187189
from sklearn.multiclass import OneVsRestClassifier
188190

189191
classifier = OneVsRestClassifier(
190-
make_pipeline(StandardScaler(), LinearSVC(random_state=random_state))
192+
make_pipeline(StandardScaler(), LinearSVC(random_state=random_state, dual="auto"))
191193
)
192194
classifier.fit(X_train, Y_train)
193195
y_score = classifier.decision_function(X_test)

dev/_downloads/a126bc8be59b9a8c240264570cda0bcb/plot_compare_calibration.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def predict_proba(self, X):
9292
# Create classifiers
9393
lr = LogisticRegression()
9494
gnb = GaussianNB()
95-
svc = NaivelyCalibratedLinearSVC(C=1.0)
95+
svc = NaivelyCalibratedLinearSVC(C=1.0, dual="auto")
9696
rfc = RandomForestClassifier()
9797

9898
clf_list = [

0 commit comments

Comments
 (0)