Skip to content

Commit 9985495

Browse files
committed
Pushing the docs to 1.3/ for branch: 1.3.X, commit 7f9bad99d6e0a3e8ddf92a7e5561245224dab102
1 parent e6c982b commit 9985495

File tree

3,164 files changed

+23422
-44123
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

3,164 files changed

+23422
-44123
lines changed

Diff for: 1.3/.buildinfo

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# Sphinx build info version 1
22
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
3-
config: 4f5caed29f0e06a89d4de5cb2447e013
3+
config: afc023663d94539ff18489a9370c996e
44
tags: 645f666f9bcd5a90fca523b33c5a78b7

Diff for: 1.3/_downloads/006fc185672e58b056a5c134db26935c/plot_coin_segmentation.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
},
1616
"outputs": [],
1717
"source": [
18-
"# Author: Gael Varoquaux <[email protected]>\n# Brian Cheung\n# Andrew Knyazev <[email protected]>\n# License: BSD 3 clause\n\nimport time\n\nimport numpy as np\nfrom scipy.ndimage import gaussian_filter\nimport matplotlib.pyplot as plt\nfrom skimage.data import coins\nfrom skimage.transform import rescale\n\nfrom sklearn.feature_extraction import image\nfrom sklearn.cluster import spectral_clustering\n\n\n# load the coins as a numpy array\norig_coins = coins()\n\n# Resize it to 20% of the original size to speed up the processing\n# Applying a Gaussian filter for smoothing prior to down-scaling\n# reduces aliasing artifacts.\nsmoothened_coins = gaussian_filter(orig_coins, sigma=2)\nrescaled_coins = rescale(smoothened_coins, 0.2, mode=\"reflect\", anti_aliasing=False)\n\n# Convert the image into a graph with the value of the gradient on the\n# edges.\ngraph = image.img_to_graph(rescaled_coins)\n\n# Take a decreasing function of the gradient: an exponential\n# The smaller beta is, the more independent the segmentation is of the\n# actual image. For beta=1, the segmentation is close to a voronoi\nbeta = 10\neps = 1e-6\ngraph.data = np.exp(-beta * graph.data / graph.data.std()) + eps\n\n# The number of segmented regions to display needs to be chosen manually.\n# The current version of 'spectral_clustering' does not support determining\n# the number of good quality clusters automatically.\nn_regions = 26"
18+
"# Author: Gael Varoquaux <[email protected]>\n# Brian Cheung\n# Andrew Knyazev <[email protected]>\n# License: BSD 3 clause\n\nimport time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.ndimage import gaussian_filter\nfrom skimage.data import coins\nfrom skimage.transform import rescale\n\nfrom sklearn.cluster import spectral_clustering\nfrom sklearn.feature_extraction import image\n\n# load the coins as a numpy array\norig_coins = coins()\n\n# Resize it to 20% of the original size to speed up the processing\n# Applying a Gaussian filter for smoothing prior to down-scaling\n# reduces aliasing artifacts.\nsmoothened_coins = gaussian_filter(orig_coins, sigma=2)\nrescaled_coins = rescale(smoothened_coins, 0.2, mode=\"reflect\", anti_aliasing=False)\n\n# Convert the image into a graph with the value of the gradient on the\n# edges.\ngraph = image.img_to_graph(rescaled_coins)\n\n# Take a decreasing function of the gradient: an exponential\n# The smaller beta is, the more independent the segmentation is of the\n# actual image. For beta=1, the segmentation is close to a voronoi\nbeta = 10\neps = 1e-6\ngraph.data = np.exp(-beta * graph.data / graph.data.std()) + eps\n\n# The number of segmented regions to display needs to be chosen manually.\n# The current version of 'spectral_clustering' does not support determining\n# the number of good quality clusters automatically.\nn_regions = 26"
1919
]
2020
},
2121
{

Diff for: 1.3/_downloads/00ae629d652473137a3905a5e08ea815/plot_iris_dtc.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -23,13 +23,12 @@
2323

2424
# %%
2525
# Display the decision functions of trees trained on all pairs of features.
26-
import numpy as np
2726
import matplotlib.pyplot as plt
27+
import numpy as np
2828

2929
from sklearn.datasets import load_iris
30-
from sklearn.tree import DecisionTreeClassifier
3130
from sklearn.inspection import DecisionBoundaryDisplay
32-
31+
from sklearn.tree import DecisionTreeClassifier
3332

3433
# Parameters
3534
n_classes = 3

Diff for: 1.3/_downloads/010337852815f8103ac6cca38a812b3c/plot_roc_crossval.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@
4141
# (`class_id=0`).
4242

4343
import numpy as np
44+
4445
from sklearn.datasets import load_iris
4546

4647
iris = load_iris()
@@ -66,8 +67,7 @@
6667
import matplotlib.pyplot as plt
6768

6869
from sklearn import svm
69-
from sklearn.metrics import auc
70-
from sklearn.metrics import RocCurveDisplay
70+
from sklearn.metrics import RocCurveDisplay, auc
7171
from sklearn.model_selection import StratifiedKFold
7272

7373
n_splits = 6

Diff for: 1.3/_downloads/01fdc7c95204e4a420de7cd297711693/plot_feature_union.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,12 @@
2020
#
2121
# License: BSD 3 clause
2222

23-
from sklearn.pipeline import Pipeline, FeatureUnion
24-
from sklearn.model_selection import GridSearchCV
25-
from sklearn.svm import SVC
2623
from sklearn.datasets import load_iris
2724
from sklearn.decomposition import PCA
2825
from sklearn.feature_selection import SelectKBest
26+
from sklearn.model_selection import GridSearchCV
27+
from sklearn.pipeline import FeatureUnion, Pipeline
28+
from sklearn.svm import SVC
2929

3030
iris = load_iris()
3131

Diff for: 1.3/_downloads/02a1306a494b46cc56c930ceec6e8c4a/plot_species_kde.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,9 @@
4040
#
4141
# License: BSD 3 clause
4242

43-
import numpy as np
4443
import matplotlib.pyplot as plt
44+
import numpy as np
45+
4546
from sklearn.datasets import fetch_species_distributions
4647
from sklearn.neighbors import KernelDensity
4748

Diff for: 1.3/_downloads/02a7bbce3c39c70d62d80e875968e5c6/plot_digits_kde_sampling.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -11,13 +11,13 @@
1111
1212
"""
1313

14-
import numpy as np
1514
import matplotlib.pyplot as plt
15+
import numpy as np
1616

1717
from sklearn.datasets import load_digits
18-
from sklearn.neighbors import KernelDensity
1918
from sklearn.decomposition import PCA
2019
from sklearn.model_selection import GridSearchCV
20+
from sklearn.neighbors import KernelDensity
2121

2222
# load the data
2323
digits = load_digits()

Diff for: 1.3/_downloads/02d88d76c60b7397c8c6e221b31568dd/plot_grid_search_refit_callable.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@
2020

2121
# Author: Wenhao Zhang <[email protected]>
2222

23-
import numpy as np
2423
import matplotlib.pyplot as plt
24+
import numpy as np
2525

2626
from sklearn.datasets import load_digits
2727
from sklearn.decomposition import PCA

Diff for: 1.3/_downloads/02f111fb3dd79805b161e14c564184fc/plot_sgd_comparison.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
},
1616
"outputs": [],
1717
"source": [
18-
"# Author: Rob Zinkov <rob at zinkov dot com>\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import SGDClassifier, Perceptron\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.linear_model import LogisticRegression\n\nheldout = [0.95, 0.90, 0.75, 0.50, 0.01]\n# Number of rounds to fit and evaluate an estimator.\nrounds = 10\nX, y = datasets.load_digits(return_X_y=True)\n\nclassifiers = [\n (\"SGD\", SGDClassifier(max_iter=110)),\n (\"ASGD\", SGDClassifier(max_iter=110, average=True)),\n (\"Perceptron\", Perceptron(max_iter=110)),\n (\n \"Passive-Aggressive I\",\n PassiveAggressiveClassifier(max_iter=110, loss=\"hinge\", C=1.0, tol=1e-4),\n ),\n (\n \"Passive-Aggressive II\",\n PassiveAggressiveClassifier(\n max_iter=110, loss=\"squared_hinge\", C=1.0, tol=1e-4\n ),\n ),\n (\n \"SAG\",\n LogisticRegression(max_iter=110, solver=\"sag\", tol=1e-1, C=1.0e4 / X.shape[0]),\n ),\n]\n\nxx = 1.0 - np.array(heldout)\n\nfor name, clf in classifiers:\n print(\"training %s\" % name)\n rng = np.random.RandomState(42)\n yy = []\n for i in heldout:\n yy_ = []\n for r in range(rounds):\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=i, random_state=rng\n )\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n yy_.append(1 - np.mean(y_pred == y_test))\n yy.append(np.mean(yy_))\n plt.plot(xx, yy, label=name)\n\nplt.legend(loc=\"upper right\")\nplt.xlabel(\"Proportion train\")\nplt.ylabel(\"Test Error Rate\")\nplt.show()"
18+
"# Author: Rob Zinkov <rob at zinkov dot com>\n# License: BSD 3 clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn import datasets\nfrom sklearn.linear_model import (\n LogisticRegression,\n PassiveAggressiveClassifier,\n Perceptron,\n SGDClassifier,\n)\nfrom sklearn.model_selection import train_test_split\n\nheldout = [0.95, 0.90, 0.75, 0.50, 0.01]\n# Number of rounds to fit and evaluate an estimator.\nrounds = 10\nX, y = datasets.load_digits(return_X_y=True)\n\nclassifiers = [\n (\"SGD\", SGDClassifier(max_iter=110)),\n (\"ASGD\", SGDClassifier(max_iter=110, average=True)),\n (\"Perceptron\", Perceptron(max_iter=110)),\n (\n \"Passive-Aggressive I\",\n PassiveAggressiveClassifier(max_iter=110, loss=\"hinge\", C=1.0, tol=1e-4),\n ),\n (\n \"Passive-Aggressive II\",\n PassiveAggressiveClassifier(\n max_iter=110, loss=\"squared_hinge\", C=1.0, tol=1e-4\n ),\n ),\n (\n \"SAG\",\n LogisticRegression(max_iter=110, solver=\"sag\", tol=1e-1, C=1.0e4 / X.shape[0]),\n ),\n]\n\nxx = 1.0 - np.array(heldout)\n\nfor name, clf in classifiers:\n print(\"training %s\" % name)\n rng = np.random.RandomState(42)\n yy = []\n for i in heldout:\n yy_ = []\n for r in range(rounds):\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=i, random_state=rng\n )\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n yy_.append(1 - np.mean(y_pred == y_test))\n yy.append(np.mean(yy_))\n plt.plot(xx, yy, label=name)\n\nplt.legend(loc=\"upper right\")\nplt.xlabel(\"Proportion train\")\nplt.ylabel(\"Test Error Rate\")\nplt.show()"
1919
]
2020
}
2121
],

Diff for: 1.3/_downloads/036b9372e2e7802453cbb994da7a6786/plot_linearsvc_support_vectors.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,12 @@
99
1010
"""
1111

12-
import numpy as np
1312
import matplotlib.pyplot as plt
13+
import numpy as np
14+
1415
from sklearn.datasets import make_blobs
15-
from sklearn.svm import LinearSVC
1616
from sklearn.inspection import DecisionBoundaryDisplay
17+
from sklearn.svm import LinearSVC
1718

1819
X, y = make_blobs(n_samples=40, centers=2, random_state=0)
1920

Diff for: 1.3/_downloads/0486bf9e537e44cedd2a236d034bcd90/plot_pcr_vs_pls.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
},
2323
"outputs": [],
2424
"source": [
25-
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\n\nrng = np.random.RandomState(0)\nn_samples = 500\ncov = [[3, 3], [3, 4]]\nX = rng.multivariate_normal(mean=[0, 0], cov=cov, size=n_samples)\npca = PCA(n_components=2).fit(X)\n\n\nplt.scatter(X[:, 0], X[:, 1], alpha=0.3, label=\"samples\")\nfor i, (comp, var) in enumerate(zip(pca.components_, pca.explained_variance_)):\n comp = comp * var # scale component by its variance explanation power\n plt.plot(\n [0, comp[0]],\n [0, comp[1]],\n label=f\"Component {i}\",\n linewidth=5,\n color=f\"C{i + 2}\",\n )\nplt.gca().set(\n aspect=\"equal\",\n title=\"2-dimensional dataset with principal components\",\n xlabel=\"first feature\",\n ylabel=\"second feature\",\n)\nplt.legend()\nplt.show()"
25+
"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn.decomposition import PCA\n\nrng = np.random.RandomState(0)\nn_samples = 500\ncov = [[3, 3], [3, 4]]\nX = rng.multivariate_normal(mean=[0, 0], cov=cov, size=n_samples)\npca = PCA(n_components=2).fit(X)\n\n\nplt.scatter(X[:, 0], X[:, 1], alpha=0.3, label=\"samples\")\nfor i, (comp, var) in enumerate(zip(pca.components_, pca.explained_variance_)):\n comp = comp * var # scale component by its variance explanation power\n plt.plot(\n [0, comp[0]],\n [0, comp[1]],\n label=f\"Component {i}\",\n linewidth=5,\n color=f\"C{i + 2}\",\n )\nplt.gca().set(\n aspect=\"equal\",\n title=\"2-dimensional dataset with principal components\",\n xlabel=\"first feature\",\n ylabel=\"second feature\",\n)\nplt.legend()\nplt.show()"
2626
]
2727
},
2828
{
@@ -58,7 +58,7 @@
5858
},
5959
"outputs": [],
6060
"source": [
61-
"from sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn.cross_decomposition import PLSRegression\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)\n\npcr = make_pipeline(StandardScaler(), PCA(n_components=1), LinearRegression())\npcr.fit(X_train, y_train)\npca = pcr.named_steps[\"pca\"] # retrieve the PCA step of the pipeline\n\npls = PLSRegression(n_components=1)\npls.fit(X_train, y_train)\n\nfig, axes = plt.subplots(1, 2, figsize=(10, 3))\naxes[0].scatter(pca.transform(X_test), y_test, alpha=0.3, label=\"ground truth\")\naxes[0].scatter(\n pca.transform(X_test), pcr.predict(X_test), alpha=0.3, label=\"predictions\"\n)\naxes[0].set(\n xlabel=\"Projected data onto first PCA component\", ylabel=\"y\", title=\"PCR / PCA\"\n)\naxes[0].legend()\naxes[1].scatter(pls.transform(X_test), y_test, alpha=0.3, label=\"ground truth\")\naxes[1].scatter(\n pls.transform(X_test), pls.predict(X_test), alpha=0.3, label=\"predictions\"\n)\naxes[1].set(xlabel=\"Projected data onto first PLS component\", ylabel=\"y\", title=\"PLS\")\naxes[1].legend()\nplt.tight_layout()\nplt.show()"
61+
"from sklearn.cross_decomposition import PLSRegression\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)\n\npcr = make_pipeline(StandardScaler(), PCA(n_components=1), LinearRegression())\npcr.fit(X_train, y_train)\npca = pcr.named_steps[\"pca\"] # retrieve the PCA step of the pipeline\n\npls = PLSRegression(n_components=1)\npls.fit(X_train, y_train)\n\nfig, axes = plt.subplots(1, 2, figsize=(10, 3))\naxes[0].scatter(pca.transform(X_test), y_test, alpha=0.3, label=\"ground truth\")\naxes[0].scatter(\n pca.transform(X_test), pcr.predict(X_test), alpha=0.3, label=\"predictions\"\n)\naxes[0].set(\n xlabel=\"Projected data onto first PCA component\", ylabel=\"y\", title=\"PCR / PCA\"\n)\naxes[0].legend()\naxes[1].scatter(pls.transform(X_test), y_test, alpha=0.3, label=\"ground truth\")\naxes[1].scatter(\n pls.transform(X_test), pls.predict(X_test), alpha=0.3, label=\"predictions\"\n)\naxes[1].set(xlabel=\"Projected data onto first PLS component\", ylabel=\"y\", title=\"PLS\")\naxes[1].legend()\nplt.tight_layout()\nplt.show()"
6262
]
6363
},
6464
{

Diff for: 1.3/_downloads/055e50ba9fa8c7dcf45dc8f1f32a0243/plot_nnls.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,9 @@
99
1010
"""
1111

12-
import numpy as np
1312
import matplotlib.pyplot as plt
13+
import numpy as np
14+
1415
from sklearn.metrics import r2_score
1516

1617
# %%

Diff for: 1.3/_downloads/055e8313e28f2f3b5fd508054dfe5fe0/plot_roc_crossval.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
},
2323
"outputs": [],
2424
"source": [
25-
"import numpy as np\nfrom sklearn.datasets import load_iris\n\niris = load_iris()\ntarget_names = iris.target_names\nX, y = iris.data, iris.target\nX, y = X[y != 2], y[y != 2]\nn_samples, n_features = X.shape"
25+
"import numpy as np\n\nfrom sklearn.datasets import load_iris\n\niris = load_iris()\ntarget_names = iris.target_names\nX, y = iris.data, iris.target\nX, y = X[y != 2], y[y != 2]\nn_samples, n_features = X.shape"
2626
]
2727
},
2828
{
@@ -58,7 +58,7 @@
5858
},
5959
"outputs": [],
6060
"source": [
61-
"import matplotlib.pyplot as plt\n\nfrom sklearn import svm\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import RocCurveDisplay\nfrom sklearn.model_selection import StratifiedKFold\n\nn_splits = 6\ncv = StratifiedKFold(n_splits=n_splits)\nclassifier = svm.SVC(kernel=\"linear\", probability=True, random_state=random_state)\n\ntprs = []\naucs = []\nmean_fpr = np.linspace(0, 1, 100)\n\nfig, ax = plt.subplots(figsize=(6, 6))\nfor fold, (train, test) in enumerate(cv.split(X, y)):\n classifier.fit(X[train], y[train])\n viz = RocCurveDisplay.from_estimator(\n classifier,\n X[test],\n y[test],\n name=f\"ROC fold {fold}\",\n alpha=0.3,\n lw=1,\n ax=ax,\n plot_chance_level=(fold == n_splits - 1),\n )\n interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)\n interp_tpr[0] = 0.0\n tprs.append(interp_tpr)\n aucs.append(viz.roc_auc)\n\nmean_tpr = np.mean(tprs, axis=0)\nmean_tpr[-1] = 1.0\nmean_auc = auc(mean_fpr, mean_tpr)\nstd_auc = np.std(aucs)\nax.plot(\n mean_fpr,\n mean_tpr,\n color=\"b\",\n label=r\"Mean ROC (AUC = %0.2f $\\pm$ %0.2f)\" % (mean_auc, std_auc),\n lw=2,\n alpha=0.8,\n)\n\nstd_tpr = np.std(tprs, axis=0)\ntprs_upper = np.minimum(mean_tpr + std_tpr, 1)\ntprs_lower = np.maximum(mean_tpr - std_tpr, 0)\nax.fill_between(\n mean_fpr,\n tprs_lower,\n tprs_upper,\n color=\"grey\",\n alpha=0.2,\n label=r\"$\\pm$ 1 std. dev.\",\n)\n\nax.set(\n xlim=[-0.05, 1.05],\n ylim=[-0.05, 1.05],\n xlabel=\"False Positive Rate\",\n ylabel=\"True Positive Rate\",\n title=f\"Mean ROC curve with variability\\n(Positive label '{target_names[1]}')\",\n)\nax.axis(\"square\")\nax.legend(loc=\"lower right\")\nplt.show()"
61+
"import matplotlib.pyplot as plt\n\nfrom sklearn import svm\nfrom sklearn.metrics import RocCurveDisplay, auc\nfrom sklearn.model_selection import StratifiedKFold\n\nn_splits = 6\ncv = StratifiedKFold(n_splits=n_splits)\nclassifier = svm.SVC(kernel=\"linear\", probability=True, random_state=random_state)\n\ntprs = []\naucs = []\nmean_fpr = np.linspace(0, 1, 100)\n\nfig, ax = plt.subplots(figsize=(6, 6))\nfor fold, (train, test) in enumerate(cv.split(X, y)):\n classifier.fit(X[train], y[train])\n viz = RocCurveDisplay.from_estimator(\n classifier,\n X[test],\n y[test],\n name=f\"ROC fold {fold}\",\n alpha=0.3,\n lw=1,\n ax=ax,\n plot_chance_level=(fold == n_splits - 1),\n )\n interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)\n interp_tpr[0] = 0.0\n tprs.append(interp_tpr)\n aucs.append(viz.roc_auc)\n\nmean_tpr = np.mean(tprs, axis=0)\nmean_tpr[-1] = 1.0\nmean_auc = auc(mean_fpr, mean_tpr)\nstd_auc = np.std(aucs)\nax.plot(\n mean_fpr,\n mean_tpr,\n color=\"b\",\n label=r\"Mean ROC (AUC = %0.2f $\\pm$ %0.2f)\" % (mean_auc, std_auc),\n lw=2,\n alpha=0.8,\n)\n\nstd_tpr = np.std(tprs, axis=0)\ntprs_upper = np.minimum(mean_tpr + std_tpr, 1)\ntprs_lower = np.maximum(mean_tpr - std_tpr, 0)\nax.fill_between(\n mean_fpr,\n tprs_lower,\n tprs_upper,\n color=\"grey\",\n alpha=0.2,\n label=r\"$\\pm$ 1 std. dev.\",\n)\n\nax.set(\n xlim=[-0.05, 1.05],\n ylim=[-0.05, 1.05],\n xlabel=\"False Positive Rate\",\n ylabel=\"True Positive Rate\",\n title=f\"Mean ROC curve with variability\\n(Positive label '{target_names[1]}')\",\n)\nax.axis(\"square\")\nax.legend(loc=\"lower right\")\nplt.show()"
6262
]
6363
}
6464
],

0 commit comments

Comments
 (0)