Skip to content

Commit c2a69cc

Browse files
committed
Pushing the docs to dev/ for branch: main, commit 8fcf4ce60386dc6c466eee9344276482d7703c2d
1 parent bf3f4c7 commit c2a69cc

File tree

1,217 files changed

+4539
-4773
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,217 files changed

+4539
-4773
lines changed
Binary file not shown.

Diff for: dev/_downloads/4825fc8223d1af0f3b61080c3dea3a62/plot_faces_decomposition.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ def plot_gallery(title, images, n_col=n_col, n_row=n_row, cmap=plt.cm.gray):
7575
),
7676
(
7777
"Non-negative components - NMF",
78-
decomposition.NMF(n_components=n_components, init="nndsvda", tol=5e-3),
78+
decomposition.NMF(n_components=n_components, tol=5e-3),
7979
False,
8080
),
8181
(
Binary file not shown.

Diff for: dev/_downloads/fcae36814d8e700024ca855a1eb87ca9/plot_faces_decomposition.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
},
2727
"outputs": [],
2828
"source": [
29-
"# Authors: Vlad Niculae, Alexandre Gramfort\n# License: BSD 3 clause\n\nimport logging\nfrom time import time\n\nfrom numpy.random import RandomState\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import fetch_olivetti_faces\nfrom sklearn.cluster import MiniBatchKMeans\nfrom sklearn import decomposition\n\n# Display progress logs on stdout\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)s %(levelname)s %(message)s\")\nn_row, n_col = 2, 3\nn_components = n_row * n_col\nimage_shape = (64, 64)\nrng = RandomState(0)\n\n# #############################################################################\n# Load faces data\nfaces, _ = fetch_olivetti_faces(return_X_y=True, shuffle=True, random_state=rng)\nn_samples, n_features = faces.shape\n\n# global centering\nfaces_centered = faces - faces.mean(axis=0)\n\n# local centering\nfaces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)\n\nprint(\"Dataset consists of %d faces\" % n_samples)\n\n\ndef plot_gallery(title, images, n_col=n_col, n_row=n_row, cmap=plt.cm.gray):\n plt.figure(figsize=(2.0 * n_col, 2.26 * n_row))\n plt.suptitle(title, size=16)\n for i, comp in enumerate(images):\n plt.subplot(n_row, n_col, i + 1)\n vmax = max(comp.max(), -comp.min())\n plt.imshow(\n comp.reshape(image_shape),\n cmap=cmap,\n interpolation=\"nearest\",\n vmin=-vmax,\n vmax=vmax,\n )\n plt.xticks(())\n plt.yticks(())\n plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.0)\n\n\n# #############################################################################\n# List of the different estimators, whether to center and transpose the\n# problem, and whether the transformer uses the clustering API.\nestimators = [\n (\n \"Eigenfaces - PCA using randomized SVD\",\n decomposition.PCA(\n n_components=n_components, svd_solver=\"randomized\", whiten=True\n ),\n True,\n ),\n (\n \"Non-negative components - NMF\",\n decomposition.NMF(n_components=n_components, init=\"nndsvda\", tol=5e-3),\n False,\n ),\n (\n \"Independent components - FastICA\",\n decomposition.FastICA(n_components=n_components, whiten=True),\n True,\n ),\n (\n \"Sparse comp. - MiniBatchSparsePCA\",\n decomposition.MiniBatchSparsePCA(\n n_components=n_components,\n alpha=0.8,\n n_iter=100,\n batch_size=3,\n random_state=rng,\n ),\n True,\n ),\n (\n \"MiniBatchDictionaryLearning\",\n decomposition.MiniBatchDictionaryLearning(\n n_components=15, alpha=0.1, n_iter=50, batch_size=3, random_state=rng\n ),\n True,\n ),\n (\n \"Cluster centers - MiniBatchKMeans\",\n MiniBatchKMeans(\n n_clusters=n_components,\n tol=1e-3,\n batch_size=20,\n max_iter=50,\n random_state=rng,\n ),\n True,\n ),\n (\n \"Factor Analysis components - FA\",\n decomposition.FactorAnalysis(n_components=n_components, max_iter=20),\n True,\n ),\n]\n\n\n# #############################################################################\n# Plot a sample of the input data\n\nplot_gallery(\"First centered Olivetti faces\", faces_centered[:n_components])\n\n# #############################################################################\n# Do the estimation and plot it\n\nfor name, estimator, center in estimators:\n print(\"Extracting the top %d %s...\" % (n_components, name))\n t0 = time()\n data = faces\n if center:\n data = faces_centered\n estimator.fit(data)\n train_time = time() - t0\n print(\"done in %0.3fs\" % train_time)\n if hasattr(estimator, \"cluster_centers_\"):\n components_ = estimator.cluster_centers_\n else:\n components_ = estimator.components_\n\n # Plot an image representing the pixelwise variance provided by the\n # estimator e.g its noise_variance_ attribute. The Eigenfaces estimator,\n # via the PCA decomposition, also provides a scalar noise_variance_\n # (the mean of pixelwise variance) that cannot be displayed as an image\n # so we skip it.\n if (\n hasattr(estimator, \"noise_variance_\") and estimator.noise_variance_.ndim > 0\n ): # Skip the Eigenfaces case\n plot_gallery(\n \"Pixelwise variance\",\n estimator.noise_variance_.reshape(1, -1),\n n_col=1,\n n_row=1,\n )\n plot_gallery(\n \"%s - Train time %.1fs\" % (name, train_time), components_[:n_components]\n )\n\nplt.show()\n\n# #############################################################################\n# Various positivity constraints applied to dictionary learning.\nestimators = [\n (\n \"Dictionary learning\",\n decomposition.MiniBatchDictionaryLearning(\n n_components=15, alpha=0.1, n_iter=50, batch_size=3, random_state=rng\n ),\n True,\n ),\n (\n \"Dictionary learning - positive dictionary\",\n decomposition.MiniBatchDictionaryLearning(\n n_components=15,\n alpha=0.1,\n n_iter=50,\n batch_size=3,\n random_state=rng,\n positive_dict=True,\n ),\n True,\n ),\n (\n \"Dictionary learning - positive code\",\n decomposition.MiniBatchDictionaryLearning(\n n_components=15,\n alpha=0.1,\n n_iter=50,\n batch_size=3,\n fit_algorithm=\"cd\",\n random_state=rng,\n positive_code=True,\n ),\n True,\n ),\n (\n \"Dictionary learning - positive dictionary & code\",\n decomposition.MiniBatchDictionaryLearning(\n n_components=15,\n alpha=0.1,\n n_iter=50,\n batch_size=3,\n fit_algorithm=\"cd\",\n random_state=rng,\n positive_dict=True,\n positive_code=True,\n ),\n True,\n ),\n]\n\n\n# #############################################################################\n# Plot a sample of the input data\n\nplot_gallery(\n \"First centered Olivetti faces\", faces_centered[:n_components], cmap=plt.cm.RdBu\n)\n\n# #############################################################################\n# Do the estimation and plot it\n\nfor name, estimator, center in estimators:\n print(\"Extracting the top %d %s...\" % (n_components, name))\n t0 = time()\n data = faces\n if center:\n data = faces_centered\n estimator.fit(data)\n train_time = time() - t0\n print(\"done in %0.3fs\" % train_time)\n components_ = estimator.components_\n plot_gallery(name, components_[:n_components], cmap=plt.cm.RdBu)\n\nplt.show()"
29+
"# Authors: Vlad Niculae, Alexandre Gramfort\n# License: BSD 3 clause\n\nimport logging\nfrom time import time\n\nfrom numpy.random import RandomState\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import fetch_olivetti_faces\nfrom sklearn.cluster import MiniBatchKMeans\nfrom sklearn import decomposition\n\n# Display progress logs on stdout\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)s %(levelname)s %(message)s\")\nn_row, n_col = 2, 3\nn_components = n_row * n_col\nimage_shape = (64, 64)\nrng = RandomState(0)\n\n# #############################################################################\n# Load faces data\nfaces, _ = fetch_olivetti_faces(return_X_y=True, shuffle=True, random_state=rng)\nn_samples, n_features = faces.shape\n\n# global centering\nfaces_centered = faces - faces.mean(axis=0)\n\n# local centering\nfaces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)\n\nprint(\"Dataset consists of %d faces\" % n_samples)\n\n\ndef plot_gallery(title, images, n_col=n_col, n_row=n_row, cmap=plt.cm.gray):\n plt.figure(figsize=(2.0 * n_col, 2.26 * n_row))\n plt.suptitle(title, size=16)\n for i, comp in enumerate(images):\n plt.subplot(n_row, n_col, i + 1)\n vmax = max(comp.max(), -comp.min())\n plt.imshow(\n comp.reshape(image_shape),\n cmap=cmap,\n interpolation=\"nearest\",\n vmin=-vmax,\n vmax=vmax,\n )\n plt.xticks(())\n plt.yticks(())\n plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.0)\n\n\n# #############################################################################\n# List of the different estimators, whether to center and transpose the\n# problem, and whether the transformer uses the clustering API.\nestimators = [\n (\n \"Eigenfaces - PCA using randomized SVD\",\n decomposition.PCA(\n n_components=n_components, svd_solver=\"randomized\", whiten=True\n ),\n True,\n ),\n (\n \"Non-negative components - NMF\",\n decomposition.NMF(n_components=n_components, tol=5e-3),\n False,\n ),\n (\n \"Independent components - FastICA\",\n decomposition.FastICA(n_components=n_components, whiten=True),\n True,\n ),\n (\n \"Sparse comp. - MiniBatchSparsePCA\",\n decomposition.MiniBatchSparsePCA(\n n_components=n_components,\n alpha=0.8,\n n_iter=100,\n batch_size=3,\n random_state=rng,\n ),\n True,\n ),\n (\n \"MiniBatchDictionaryLearning\",\n decomposition.MiniBatchDictionaryLearning(\n n_components=15, alpha=0.1, n_iter=50, batch_size=3, random_state=rng\n ),\n True,\n ),\n (\n \"Cluster centers - MiniBatchKMeans\",\n MiniBatchKMeans(\n n_clusters=n_components,\n tol=1e-3,\n batch_size=20,\n max_iter=50,\n random_state=rng,\n ),\n True,\n ),\n (\n \"Factor Analysis components - FA\",\n decomposition.FactorAnalysis(n_components=n_components, max_iter=20),\n True,\n ),\n]\n\n\n# #############################################################################\n# Plot a sample of the input data\n\nplot_gallery(\"First centered Olivetti faces\", faces_centered[:n_components])\n\n# #############################################################################\n# Do the estimation and plot it\n\nfor name, estimator, center in estimators:\n print(\"Extracting the top %d %s...\" % (n_components, name))\n t0 = time()\n data = faces\n if center:\n data = faces_centered\n estimator.fit(data)\n train_time = time() - t0\n print(\"done in %0.3fs\" % train_time)\n if hasattr(estimator, \"cluster_centers_\"):\n components_ = estimator.cluster_centers_\n else:\n components_ = estimator.components_\n\n # Plot an image representing the pixelwise variance provided by the\n # estimator e.g its noise_variance_ attribute. The Eigenfaces estimator,\n # via the PCA decomposition, also provides a scalar noise_variance_\n # (the mean of pixelwise variance) that cannot be displayed as an image\n # so we skip it.\n if (\n hasattr(estimator, \"noise_variance_\") and estimator.noise_variance_.ndim > 0\n ): # Skip the Eigenfaces case\n plot_gallery(\n \"Pixelwise variance\",\n estimator.noise_variance_.reshape(1, -1),\n n_col=1,\n n_row=1,\n )\n plot_gallery(\n \"%s - Train time %.1fs\" % (name, train_time), components_[:n_components]\n )\n\nplt.show()\n\n# #############################################################################\n# Various positivity constraints applied to dictionary learning.\nestimators = [\n (\n \"Dictionary learning\",\n decomposition.MiniBatchDictionaryLearning(\n n_components=15, alpha=0.1, n_iter=50, batch_size=3, random_state=rng\n ),\n True,\n ),\n (\n \"Dictionary learning - positive dictionary\",\n decomposition.MiniBatchDictionaryLearning(\n n_components=15,\n alpha=0.1,\n n_iter=50,\n batch_size=3,\n random_state=rng,\n positive_dict=True,\n ),\n True,\n ),\n (\n \"Dictionary learning - positive code\",\n decomposition.MiniBatchDictionaryLearning(\n n_components=15,\n alpha=0.1,\n n_iter=50,\n batch_size=3,\n fit_algorithm=\"cd\",\n random_state=rng,\n positive_code=True,\n ),\n True,\n ),\n (\n \"Dictionary learning - positive dictionary & code\",\n decomposition.MiniBatchDictionaryLearning(\n n_components=15,\n alpha=0.1,\n n_iter=50,\n batch_size=3,\n fit_algorithm=\"cd\",\n random_state=rng,\n positive_dict=True,\n positive_code=True,\n ),\n True,\n ),\n]\n\n\n# #############################################################################\n# Plot a sample of the input data\n\nplot_gallery(\n \"First centered Olivetti faces\", faces_centered[:n_components], cmap=plt.cm.RdBu\n)\n\n# #############################################################################\n# Do the estimation and plot it\n\nfor name, estimator, center in estimators:\n print(\"Extracting the top %d %s...\" % (n_components, name))\n t0 = time()\n data = faces\n if center:\n data = faces_centered\n estimator.fit(data)\n train_time = time() - t0\n print(\"done in %0.3fs\" % train_time)\n components_ = estimator.components_\n plot_gallery(name, components_[:n_components], cmap=plt.cm.RdBu)\n\nplt.show()"
3030
]
3131
}
3232
],

Diff for: dev/_downloads/scikit-learn-docs.zip

12.1 KB
Binary file not shown.
49 Bytes
-109 Bytes
199 Bytes
352 Bytes
3 Bytes

Diff for: dev/_images/sphx_glr_plot_anomaly_comparison_001.png

269 Bytes
9 Bytes
-136 Bytes
-107 Bytes

Diff for: dev/_images/sphx_glr_plot_calibration_curve_002.png

11 Bytes

Diff for: dev/_images/sphx_glr_plot_cluster_comparison_001.png

-1.12 KB
-17 Bytes

Diff for: dev/_images/sphx_glr_plot_coin_segmentation_001.png

101 Bytes

Diff for: dev/_images/sphx_glr_plot_coin_segmentation_002.png

-23 Bytes
15 Bytes
-87 Bytes
-126 Bytes

Diff for: dev/_images/sphx_glr_plot_compare_methods_001.png

190 Bytes

Diff for: dev/_images/sphx_glr_plot_compare_methods_thumb.png

-33 Bytes

Diff for: dev/_images/sphx_glr_plot_digits_pipe_001.png

-269 Bytes

Diff for: dev/_images/sphx_glr_plot_digits_pipe_thumb.png

20 Bytes
-76 Bytes
15 Bytes
-1.34 KB
-235 Bytes

Diff for: dev/_images/sphx_glr_plot_image_denoising_001.png

137 Bytes

Diff for: dev/_images/sphx_glr_plot_image_denoising_003.png

181 Bytes

Diff for: dev/_images/sphx_glr_plot_image_denoising_004.png

-13 Bytes

Diff for: dev/_images/sphx_glr_plot_image_denoising_005.png

-64 Bytes

Diff for: dev/_images/sphx_glr_plot_image_denoising_006.png

-102 Bytes

Diff for: dev/_images/sphx_glr_plot_image_denoising_thumb.png

74 Bytes
2.49 KB
399 Bytes
258 Bytes
131 Bytes
-14 Bytes
-43 Bytes
-49 Bytes
-16 Bytes

Diff for: dev/_images/sphx_glr_plot_learning_curve_001.png

3.78 KB

Diff for: dev/_images/sphx_glr_plot_learning_curve_thumb.png

663 Bytes

Diff for: dev/_images/sphx_glr_plot_linkage_comparison_001.png

-922 Bytes
12 Bytes

Diff for: dev/_images/sphx_glr_plot_lle_digits_002.png

420 Bytes

Diff for: dev/_images/sphx_glr_plot_manifold_sphere_001.png

434 Bytes

Diff for: dev/_images/sphx_glr_plot_manifold_sphere_thumb.png

-24 Bytes
-375 Bytes
2.94 KB
9 Bytes
-463 Bytes
-229 Bytes
-35 Bytes
-456 Bytes

Diff for: dev/_images/sphx_glr_plot_prediction_latency_001.png

-804 Bytes

Diff for: dev/_images/sphx_glr_plot_prediction_latency_002.png

478 Bytes

Diff for: dev/_images/sphx_glr_plot_prediction_latency_003.png

-606 Bytes

Diff for: dev/_images/sphx_glr_plot_prediction_latency_004.png

-63 Bytes
-218 Bytes
808 Bytes
106 Bytes
8 Bytes
-63 Bytes

Diff for: dev/_images/sphx_glr_plot_sgd_early_stopping_001.png

-263 Bytes
1.19 KB

Diff for: dev/_images/sphx_glr_plot_stack_predictors_001.png

-202 Bytes

Diff for: dev/_images/sphx_glr_plot_stack_predictors_thumb.png

-12 Bytes
174 Bytes
-2 Bytes

Diff for: dev/_images/sphx_glr_plot_theilsen_001.png

22 Bytes

Diff for: dev/_images/sphx_glr_plot_theilsen_002.png

-23 Bytes

Diff for: dev/_images/sphx_glr_plot_theilsen_thumb.png

4 Bytes
-42 Bytes

Diff for: dev/_sources/auto_examples/applications/plot_cyclical_feature_engineering.rst.txt

+1-1

Diff for: dev/_sources/auto_examples/applications/plot_digits_denoising.rst.txt

+1-1

Diff for: dev/_sources/auto_examples/applications/plot_face_recognition.rst.txt

+5-5

Diff for: dev/_sources/auto_examples/applications/plot_model_complexity_influence.rst.txt

+15-15

0 commit comments

Comments
 (0)