\n\n# JupyterLite warning\n\nRunning the scikit-learn examples in JupyterLite is experimental and you may encounter some unexpected behavior.\n\nThe main difference is that imports will take a lot longer than usual, for example the first `import sklearn` can take roughly 10-20s.\n\nIf you notice problems, feel free to open an [issue](https://github.com/scikit-learn/scikit-learn/issues/new/choose) about it.\n
"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# JupyterLite-specific code\nimport matplotlib\nimport pandas"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n# Normal, Ledoit-Wolf and OAS Linear Discriminant Analysis for classification\n\nThis example illustrates how the Ledoit-Wolf and Oracle Shrinkage\nApproximating (OAS) estimators of covariance can improve classification.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn.covariance import OAS\nfrom sklearn.datasets import make_blobs\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n\nn_train = 20 # samples for training\nn_test = 200 # samples for testing\nn_averages = 50 # how often to repeat classification\nn_features_max = 75 # maximum number of features\nstep = 4 # step size for the calculation\n\n\ndef generate_data(n_samples, n_features):\n \"\"\"Generate random blob-ish data with noisy features.\n\n This returns an array of input data with shape `(n_samples, n_features)`\n and an array of `n_samples` target labels.\n\n Only one feature contains discriminative information, the other features\n contain only noise.\n \"\"\"\n X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])\n\n # add non-discriminative features\n if n_features > 1:\n X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])\n return X, y\n\n\nacc_clf1, acc_clf2, acc_clf3 = [], [], []\nn_features_range = range(1, n_features_max + 1, step)\nfor n_features in n_features_range:\n score_clf1, score_clf2, score_clf3 = 0, 0, 0\n for _ in range(n_averages):\n X, y = generate_data(n_train, n_features)\n\n clf1 = LinearDiscriminantAnalysis(solver=\"lsqr\", shrinkage=None).fit(X, y)\n clf2 = LinearDiscriminantAnalysis(solver=\"lsqr\", shrinkage=\"auto\").fit(X, y)\n oa = OAS(store_precision=False, assume_centered=False)\n clf3 = LinearDiscriminantAnalysis(solver=\"lsqr\", covariance_estimator=oa).fit(\n X, y\n )\n\n X, y = generate_data(n_test, n_features)\n score_clf1 += clf1.score(X, y)\n score_clf2 += clf2.score(X, y)\n score_clf3 += clf3.score(X, y)\n\n acc_clf1.append(score_clf1 / n_averages)\n acc_clf2.append(score_clf2 / n_averages)\n acc_clf3.append(score_clf3 / n_averages)\n\nfeatures_samples_ratio = np.array(n_features_range) / n_train\n\nplt.plot(\n features_samples_ratio,\n acc_clf1,\n linewidth=2,\n label=\"LDA\",\n color=\"gold\",\n linestyle=\"solid\",\n)\nplt.plot(\n features_samples_ratio,\n acc_clf2,\n linewidth=2,\n label=\"LDA with Ledoit Wolf\",\n color=\"navy\",\n linestyle=\"dashed\",\n)\nplt.plot(\n features_samples_ratio,\n acc_clf3,\n linewidth=2,\n label=\"LDA with OAS\",\n color=\"red\",\n linestyle=\"dotted\",\n)\n\nplt.xlabel(\"n_features / n_samples\")\nplt.ylabel(\"Classification accuracy\")\n\nplt.legend(loc=\"lower left\")\nplt.ylim((0.65, 1.0))\nplt.suptitle(\n \"LDA (Linear Discriminant Analysis) vs. \"\n + \"\\n\"\n + \"LDA with Ledoit Wolf vs. \"\n + \"\\n\"\n + \"LDA with OAS (1 discriminative feature)\"\n)\nplt.show()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.18"
}
},
"nbformat": 4,
"nbformat_minor": 0
}