0% found this document useful (0 votes)
15 views3 pages

Linearregression SVM

The document contains code snippets demonstrating different machine learning algorithms including linear regression, SVM, logistic regression, decision trees, KNN, and random forests. For each algorithm, it imports libraries, loads/generates sample data, trains a model on training data and tests it on test data to calculate accuracy metrics.

Uploaded by

4023 Keerthana
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
15 views3 pages

Linearregression SVM

The document contains code snippets demonstrating different machine learning algorithms including linear regression, SVM, logistic regression, decision trees, KNN, and random forests. For each algorithm, it imports libraries, loads/generates sample data, trains a model on training data and tests it on test data to calculate accuracy metrics.

Uploaded by

4023 Keerthana
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 3

LinearRegression svm

# Importing necessary libraries # Importing necessary libraries


import numpy as np import numpy as np
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression from sklearn import datasets
import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split
# Generating some sample data from sklearn.svm import SVC
np.random.seed(0) from sklearn.metrics import accuracy_score, confusion_matrix
X = 2 * np.random.rand(100, 1) # Independent variable # Load the iris dataset
y = 4 + 3 * X + np.random.randn(100, 1) # Dependent variable iris = datasets.load_iris()
# Splitting the data into training and testing sets X = iris.data[:, :2] # We only take the first two features for visualization purposes
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, y = iris.target
random_state=42) # Splitting the data into training and testing sets
# Training the linear regression model X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
model = LinearRegression() random_state=42)
model.fit(X_train, y_train) # Training the SVM model
# Printing the coefficients (slope and intercept) model = SVC(kernel='linear', C=1) # Using a linear kernel for simplicity
print("Slope (Coefficient):", model.coef_[0][0]) model.fit(X_train, y_train)
print("Intercept:", model.intercept_[0]) # Making predictions on the testing set
# Making predictions on the testing set y_pred = model.predict(X_test)
y_pred = model.predict(X_test) # Calculating accuracy
# Plotting the data and the regression line accuracy = accuracy_score(y_test, y_pred)
plt.scatter(X_test, y_test, color='black') print("Accuracy:", accuracy)
plt.plot(X_test, y_pred, color='blue', linewidth=3) # Creating a confusion matrix
plt.xlabel('X') conf_matrix = confusion_matrix(y_test, y_pred)
plt.ylabel('y') print("Confusion Matrix:")
plt.title('Linear Regression') print(conf_matrix)
plt.show() # Plotting decision boundary
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
LogisticRegression y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
# Importing necessary libraries xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1), np.arange(y_min, y_max,
import numpy as np 0.1))
from sklearn.model_selection import train_test_split Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
from sklearn.linear_model import LogisticRegression Z = Z.reshape(xx.shape)
from sklearn.metrics import accuracy_score, confusion_matrix plt.contourf(xx, yy, Z, alpha=0.4)
import matplotlib.pyplot as plt plt.scatter(X[:, 0], X[:, 1], c=y, marker='o', edgecolors='k')
# Generating some sample data plt.xlabel('Sepal length')
np.random.seed(0) plt.ylabel('Sepal width')
X = 2 * np.random.rand(100, 1) # Independent variable plt.title('SVM Decision Boundary')
y = (4 + 3 * X + np.random.randn(100, 1)) > 6 # Generating binary target variable plt.show()
# Splitting the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, decision forest
random_state=42) from sklearn.datasets import load_iris
# Training the logistic regression model from sklearn.model_selection import train_test_split
model = LogisticRegression() from sklearn.tree import DecisionTreeClassifier, plot_tree
model.fit(X_train, y_train) import matplotlib.pyplot as plt
# Making predictions on the testing set # Load the iris dataset
y_pred = model.predict(X_test) iris = load_iris()
# Calculating accuracy X = iris.data
accuracy = accuracy_score(y_test, y_pred) y = iris.target
print("Accuracy:", accuracy) # Split the dataset into training and testing sets
# Creating a confusion matrix X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
conf_matrix = confusion_matrix(y_test, y_pred) random_state=42)
print("Confusion Matrix:") # Initialize the Decision Tree classifier
print(conf_matrix) clf = DecisionTreeClassifier()
# Plotting the decision boundary # Train the classifier on the training data
plt.scatter(X_test, y_test, color='black') clf.fit(X_train, y_train)
plt.scatter(X_test, y_pred, color='blue', marker='x') # Visualize the decision tree
plt.xlabel('X') plt.figure(figsize=(12, 8))
plt.ylabel('y') plot_tree(clf, feature_names=iris.feature_names,
plt.title('Logistic Regression') class_names=iris.target_names, filled=True)
plt.show() plt.show()
knn
# Importing necessary libraries random forest
import numpy as np # Importing necessary libraries
import matplotlib.pyplot as plt import numpy as np
from sklearn import datasets import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split from sklearn.datasets import make_classification
from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.ensemble import RandomForestClassifier
# Load the iris dataset from sklearn.metrics import accuracy_score, confusion_matrix
iris = datasets.load_iris() # Generating some sample data
X = iris.data[:, :2] # We only take the first two features for visualization purposes X, y = make_classification(n_samples=1000, n_features=20, n_classes=2,
y = iris.target random_state=42)
# Splitting the data into training and testing sets # Splitting the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42) random_state=42)
# Training the k-NN model # Training the Random Forest model
k = 5 # Number of neighbors model = RandomForestClassifier(n_estimators=100, random_state=42)
model = KNeighborsClassifier(n_neighbors=k) model.fit(X_train, y_train)
model.fit(X_train, y_train) # Making predictions on the testing set
# Making predictions on the testing set y_pred = model.predict(X_test)
y_pred = model.predict(X_test) # Calculating accuracy
# Calculating accuracy accuracy = accuracy_score(y_test, y_pred)
accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy)
print("Accuracy:", accuracy) # Creating a confusion matrix
# Creating a confusion matrix conf_matrix = confusion_matrix(y_test, y_pred)
conf_matrix = confusion_matrix(y_test, y_pred) print("Confusion Matrix:")
print("Confusion Matrix:") print(conf_matrix)
print(conf_matrix)
# Plotting decision boundary bagging
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 # Importing necessary libraries
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 import numpy as np
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1), np.arange(y_min, y_max, import matplotlib.pyplot as plt
0.1)) from sklearn.datasets import make_classification
Z = model.predict(np.c_[xx.ravel(), yy.ravel()]) from sklearn.model_selection import train_test_split
Z = Z.reshape(xx.shape) from sklearn.ensemble import BaggingClassifier
plt.contourf(xx, yy, Z, alpha=0.4) from sklearn.tree import DecisionTreeClassifier
plt.scatter(X[:, 0], X[:, 1], c=y, marker='o', edgecolors='k') from sklearn.metrics import accuracy_score, confusion_matrix
plt.xlabel('Sepal length') # Generating some sample data
plt.ylabel('Sepal width') X, y = make_classification(n_samples=1000, n_features=20, n_classes=2,
plt.title('k-NN Decision Boundary') random_state=42)
plt.show() # Splitting the data into training and testing sets
k-means X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
# Importing necessary libraries random_state=42)
import numpy as np # Training the Bagging Classifier with Decision Trees as base estimators
import matplotlib.pyplot as plt base_estimator = DecisionTreeClassifier(max_depth=3)
from sklearn.datasets import make_blobs model = BaggingClassifier(base_estimator=base_estimator, n_estimators=10,
from sklearn.cluster import KMeans random_state=42)
# Generating some sample data model.fit(X_train, y_train)
X, _ = make_blobs(n_samples=300, centers=4, cluster_std=0.60, # Making predictions on the testing set
random_state=0) y_pred = model.predict(X_test)
# Visualizing the sample data # Calculating accuracy
plt.scatter(X[:, 0], X[:, 1], s=50, cmap='viridis') accuracy = accuracy_score(y_test, y_pred)
plt.xlabel('Feature 1') print("Accuracy:", accuracy)
plt.ylabel('Feature 2') # Creating a confusion matrix
plt.title('Sample Data') conf_matrix = confusion_matrix(y_test, y_pred)
plt.show() print("Confusion Matrix:")
# Applying k-means clustering print(conf_matrix)
k = 4 # Number of clusters
model = KMeans(n_clusters=k)
model.fit(X)
# Getting cluster centers and labels
centers = model.cluster_centers_
labels = model.labels_
# Visualizing the clusters
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='viridis')
plt.scatter(centers[:, 0], centers[:, 1], c='red', s=200, alpha=0.75)
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.title('K-means Clustering')
plt.show()
boosting pca
# Importing necessary libraries # Importing necessary libraries
import numpy as np import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from sklearn.datasets import make_classification from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split from sklearn.decomposition import PCA
from sklearn.ensemble import AdaBoostClassifier # Load the iris dataset
from sklearn.tree import DecisionTreeClassifier iris = load_iris()
from sklearn.metrics import accuracy_score, confusion_matrix X = iris.data
# Generating some sample data y = iris.target
X, y = make_classification(n_samples=1000, n_features=20, n_classes=2, # Applying PCA
random_state=42) pca = PCA(n_components=2) # Reduce to 2 dimensions
# Splitting the data into training and testing sets X_pca = pca.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, # Visualizing the transformed data
random_state=42) plt.figure(figsize=(8, 6))
# Training the AdaBoost Classifier with Decision Trees as base estimator plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y, cmap='viridis')
base_estimator = DecisionTreeClassifier(max_depth=1) # Weak learner plt.xlabel('Principal Component 1')
model = AdaBoostClassifier(base_estimator=base_estimator, n_estimators=50, plt.ylabel('Principal Component 2')
random_state=42) plt.title('PCA')
model.fit(X_train, y_train) plt.colorbar(label='Target Class')
# Making predictions on the testing set plt.show()
y_pred = model.predict(X_test)
# Calculating accuracy
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy)
# Creating a confusion matrix
conf_matrix = confusion_matrix(y_test, y_pred)
print("Confusion Matrix:")
print(conf_matrix)

cnn
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
# Load and preprocess the CIFAR-10 dataset
(train_images, train_labels), (test_images, test_labels) =
datasets.cifar10.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0
# Define the CNN model
model = models.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(10)
])
# Compile the model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# Train the model
history = model.fit(train_images, train_labels, epochs=10,
validation_data=(test_images, test_labels))
# Evaluate the model
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print(f"Test accuracy: {test_acc}")

You might also like