ML_assignment
ML_assignment
ASSIGNMENT
Description :
PYTHON CODE:
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, KFold
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Conv1D, Flatten, LSTM,
Dropout, Concatenate, Reshape
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import Adam
# Load dataset
data = pd.read_excel('CLEANED_DATASET.xlsx')
data = data.dropna()
X = data.iloc[:, :-1].values
y = data.iloc[:, -1].values
X_scaled = scaler.fit_transform(X)
# Reshape data for CNN and RNN input
X_cnn = X.reshape(X.shape[0], X.shape[1], 1)
X_rnn = X.reshape(X.shape[0], X.shape[1], 1)
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2,
random_state=42)
# Hyperparameter tuning (placeholders for tuning later if needed)
input_shape = X_cnn.shape[1:]
# CNN Model
cnn_input = Input(shape=input_shape)
cnn = Conv1D(64, kernel_size=3, activation='relu')(cnn_input)
cnn = Conv1D(32, kernel_size=3, activation='relu')(cnn)
cnn = Flatten()(cnn)
# RNN Model
rnn_input = Input(shape=input_shape)
rnn = LSTM(64, return_sequences=True)(rnn_input)
rnn = LSTM(32)(rnn)
# MLP Model
mlp_input = Input(shape=(X.shape[1],))
mlp = Dense(64, activation='relu')(mlp_input)
mlp = Dense(32, activation='relu')(mlp)
# Combine models
combined = Concatenate()([cnn, rnn, mlp])
combined = Dense(64, activation='relu')(combined)
combined = Dropout(0.3)(combined)
combined = Dense(32, activation='relu')(combined)
output = Dense(1)(combined)
# Compile model
model = Model(inputs=[cnn_input, rnn_input, mlp_input], outputs=output)
model.compile(optimizer=Adam(learning_rate=0.001), loss='mse',
metrics=['mae'])
# K-fold cross-validation
kfold = KFold(n_splits=5, shuffle=True, random_state=42)
histories = []
histories.append(history)
# Evaluate model
final_loss, final_mae = model.evaluate([X_cnn, X_rnn, X], y)
print(f"Final Loss: {final_loss}, Final MAE: {final_mae}")
import tensorflow.keras.backend as K
# Define a custom accuracy function for regression (you can set a threshold for
acceptable error)
def regression_accuracy(y_true, y_pred):
threshold = 20 # Customize this threshold as per your needs
error = K.abs(y_true - y_pred)
correct_predictions = K.less_equal(error, threshold) # True if error is within
threshold
accuracy = K.mean(K.cast(correct_predictions, K.floatx()))
return accuracy
# Evaluate model
final_loss, final_mae, final_accuracy = model.evaluate([X_cnn, X_rnn, X], y)
print(f"Final Loss: {final_loss}, Final MAE: {final_mae}, Final Accuracy:
{final_accuracy * 100:.2f}%")
print("Reg.No : 22BCE7452")
OUTPUT :
MODEL 2 :
Using CNN-LSTM-SVM (Convolutional Neural Network + Long Short-Term Memory + Support Vector
Machine)
Description :
This model integrates CNNs, Long Short-Term Memory (LSTM) networks, and
Support Vector Machines (SVMs). CNNs extract spatial or hierarchical features,
while LSTMs handle sequential dependencies with better memory retention than
standard RNNs. The final feature set is fed into an SVM for classification,
leveraging its robustness with high-dimensional and non-linear data. This
architecture is effective for tasks like gesture recognition, sentiment analysis,
and spatiotemporal activity detection.
We used the cleaned dataset after removing irrelevant data.
PYTHON CODE:
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, LSTM,
Dense, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from sklearn.svm import SVR
from sklearn.model_selection import train_test_split, KFold, GridSearchCV
from sklearn.metrics import mean_absolute_error, make_scorer
import tensorflow.keras.backend as K
from sklearn.preprocessing import StandardScaler
import pandas as pd
# Model evaluation
y_pred = best_svm_model.predict(test_features)
final_mae = mean_absolute_error(y_test, y_pred)
final_accuracy = regression_accuracy(tf.constant(y_test, dtype=tf.float32),
tf.constant(y_pred, dtype=tf.float32))
OUTPUT: