0% found this document useful (0 votes)
9 views8 pages

ANNEX C Codes - Corrected

Uploaded by

Moti Ram giri
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
9 views8 pages

ANNEX C Codes - Corrected

Uploaded by

Moti Ram giri
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 8

APPENDIX C: SAMPLE CODES FOR ANN

import numpy as np

import pandas as pd

import seaborn as sns

import tensorflow as tf

from sklearn.model_selection import train_test_split

from sklearn.preprocessing import StandardScaler

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import Dense,Dropout

from sklearn.metrics import mean_squared_error

from tensorflow.keras.callbacks import EarlyStopping

import math

from sklearn.metrics import r2_score

data = pd.read_csv("G:\\My Drive\\xyz\\mechine learing of marshal test\\theisis


work\\for midterm defence\\Training Data\\AV.csv")

data.head()

#Extract features (X) and target (y)

X = data.iloc[:, :-6].values # All columns except the last one

y_stability = data["Stability"].values # Stability is the target column

y_flow = data["FlowValue"].values # FlowValue is the target column

y_airVoids = data["AV"].values # Air Value is the target column

y_VFA = data["VFA"].values # VFA is the target column


y_VMA = data["VFA"].values # VMA is the target column

y_BSG = data["BSG"].values # bulk specific gravity is the target column'''

# Split the dataset into training and testing sets

X_train, X_test, y_stability_train, y_stability_test, y_FV_train, y_FV_test,


y_AV_train, y_AV_test ,y_VFA_train, y_VFA_test ,y_VMA_train, y_VMA_test
,y_BSG_train, y_BSG_test = train_test_split(X,
y_stability,y_flow,y_airVoids,y_VFA,y_VMA,y_BSG, train_size=0.80,
random_state=42)

#X_train,X_val,y_stability_train,y_val=
train_test_split(X_train1,y_stability_train1,test_size=0.20, random_state=42)

# Standardize the features (scaling)

print(len(y_stability))

#print(len(y_val))

print(len(y_stability_test))

print(len(y_stability_train))

scaler = StandardScaler()

X_train = scaler.fit_transform(X_train)

#X_val=scaler.fit_transform(X_val)

X_test = scaler.transform(X_test)

Nol =6

l=115

u=116

m=7

from matplotlib.colors import Normalize


for NoN in range(l,u):

# Initialize the model

model_stability = Sequential()

early_stopping_epoch=[]

print(f'number of hidden layer is {Nol}, and neuron is : {NoN}')

model_stability.add(Dense(units=21, activation='ReLU',
input_dim=X_train.shape[1]))

#model_stability.add(Dense(units=20, activation='ReLU',
input_dim=X_train.shape[1])) #sigmoid, Relu,tanh

#model_stability.add(Dropout(0.5))

# Add 10 hidden layers with activation

j=NoN

for i in range (1,Nol+1):# with NON neurol and NOL number of hidden layer are
formed in model model _stability

model_stability.add(Dense(units=j, activation='ReLU'))

#j = max(int(j * 0.9), 2)

#model_stability.add(Dense(units=NoN, activation='ReLU'))

#model_stability.add(Dropout(0.5))

'''if j == 1:

# Use linear activation function when j becomes 1

model_stability.add(Dense(units=j, activation='linear'))

else:

model_stability.add(Dense(units=j, activation='ReLU'))

j = max(int(j * 0.8), 1)'''


# Add the output layer (assuming Stability,flow,AV,VFA,VMA,BSG is a single
output)

model_stability.add(Dense(units=1, activation='linear'))

# Compile the model

#import tensorflow as tf Sigmoids and tanh functions are sometimes avoided due to
the vanishing gradient problem

#from radam import RAdamOptimizer

from tensorflow.keras.optimizers import Adam

#from tensorflow_addons.optimizers import RectifiedAdam

# Create a custom optimizer (RectifiedAdam) with a specific learning rate (e.g.,


0.001)

#custom_optimizer1 = RectifiedAdam(learning_rate=0.001)

custom_optimizer1 = Adam(learning_rate=0.001)

# Create a custom optimizer with a specific learning rate (e.g., 0.001)

# Compile the model with the custom optimizer

model_stability.compile(optimizer=custom_optimizer1,
loss='mean_squared_error', metrics=['mae']) #Mean Absolute Error (MAE)

#epochs

a=250

# Define the EarlyStopping callback with patience set to 5/10/15

#early_stopping = EarlyStopping(monitor='val_loss', patience=10,


restore_best_weights=True)

#Replace 'model_stability' with your actual model variable

#history_S = model_stability.fit(X_train, y_stability_train, epochs=a,


batch_size=64, validation_split=0.2, verbose=1, callbacks=[early_stopping])

#early_stopping_epoch. append(early_stopping.stopped_epoch)
#train model

history_S = model_stability.fit(X_train, y_stability_train, epochs=a,


batch_size=64, validation_split=0.2, verbose=1)

#early_stopping_epoch. append(early_stopping.stopped_epoch)

# Print the model summary

model_stability.summary()

model_directory = "E:/ANN model/OBC model/" #edit names

# After training your model, save it to a file

model_filename =
f"{model_directory}model_Airvoids_ReLU_Nol_{Nol}_NoN_{NoN}.h5"

model_stability.save(model_filename)

train_loss_S = history_S.history['loss']

val_loss_S = history_S.history['val_loss']

loss_s_train, mae_s_train= model_stability.evaluate(X_train, y_stability_train)

output_values_loss_stability_train.append(float(f"{loss_s_train:.4f}"))

output_values_mae_stability_train.append(float(f"{mae_s_train:.4f}"))

predictions_stability_train = model_stability.predict(X_train)

# R Sqare Calculation :

r2_stability_train = r2_score(y_stability_train, predictions_stability_train)

'''print(f"R-squared (R^2) Score for stabitliy: {r2_stability:.4f}")'''

output_values_r2_stability_train.append(float(f"{r2_stability_train:.4f}"))

# Correlation calculaiton :

stability_corr_train = np.corrcoef(y_stability_train,
predictions_stability_train.flatten())[0, 1]

print(f'R for Stability: {stability_corr_train:.4f}')


output_values_r_stability_train.append(float(f"{stability_corr_train:.4f}"))

# Calculate RMSE for Stability

stability_rmse_train = math.sqrt(mean_squared_error(y_stability_train,
predictions_stability_train))

output_values_rmse_stability_train.append(float(f"{stability_rmse_train:.4f}"))

# Calculate residuals for both Flow Value and Stability

stability_residuals_train = y_stability_train - predictions_stability_train.flatten()

stability_rse_train = np.sqrt(np.sum(stability_residuals_train ** 2) /
(len(y_stability_train) - 1))

#print(f'RSE for Stability: {stability_rse:.4f}')

output_values_rse_stability_train.append(float(f"{stability_rse_train:.4f}"))

# validatation of the model

#history_S = model_stability.fit(X_train, y_stability_train, epochs=a,


batch_size=32, validation_data=(X_test, y_stability_test))

# Evaluate the model on the test data

loss_s, mae_s = model_stability.evaluate(X_test, y_stability_test) # supplies(x test


and predict the y value then compares stability in target set and calculate loss and
mae)

#storage of the loss and Mean Absolute Error

output_values_loss_stability.append(float(f"{loss_s:.4f}"))

output_values_mae_stability.append(float(f"{mae_s:.4f}")) #validation loss or


mae

# Make predictions

#validation works

predictions_stability = model_stability.predict(X_test)
r2_stability = r2_score(y_stability_test, predictions_stability)

#formatted_r2_stability = float(f"{r2_stability:.4f}")

'''print(f"R-squared (R^2) Score for stabitliy: {r2_stability:.4f}")'''

output_values_r2_stability.append(float(f"{r2_stability:.4f}"))
#output_values_r2_stability.append(float(f"{integer_part}.{decimal_part[:4]}"))

# Correlation calculaiton :

stability_corr = np.corrcoef(y_stability_test, predictions_stability.flatten())[0, 1]

print(f'R for Stability: {stability_corr:.4f}')

output_values_r_stability.append(float(f"{stability_corr:.4f}"))

# Calculate RMSE for Stability

stability_rmse = math.sqrt(mean_squared_error(y_stability_test,
predictions_stability))

'''print(f'RMSE for Stability: {stability_rmse:.4f}')'''

output_values_rmse_stability.append(float(f"{stability_rmse:.4f}"))

# Calculate residuals for both Flow Value and Stability

stability_residuals = y_stability_test - predictions_stability.flatten()

stability_rse = np.sqrt(np.sum(stability_residuals ** 2) / (len(y_stability_test) - 1))

#print(f'RSE for Stability: {stability_rse:.4f}')

output_values_rse_stability.append(float(f"{stability_rse:.4f}"))

output_directory = "E:/ANN model/OBC model/" #E:\ANN model\ST model#edit


name of folder

filename =
f"{output_directory}output_values_Airvoids_ReLU_train_Nol_{Nol}_NoN_{NoN}.
txt" ## Open the file for writing and save the lists

with open(filename, "w") as file:


file.write("\tLoss\tMAE\tR2\tR\tRSE\tRMSE\tepoch\n")

for values in
zip(output_values_loss_stability_train,output_values_mae_stability_train,output_valu
es_r2_stability_train,output_values_r_stability_train,output_values_rse_stability_trai
n,output_values_rmse_stability_train):

file.write("\t".join(map(str, values)) + "\n")

output_directory = "E:/ANN model/OBC model/" #E:\ANN model\ST model edit


name of folder

filename =
f"{output_directory}output_values_Airvoids_ReLU_Nol_{Nol}_NoN_{NoN}.txt"
## Open the file for writing and save the lists

with open(filename, "w") as file:

file.write("\tLoss\tMAE\tR2\tR\tRSE\tRMSE\tepoch\n")

for values in
zip(output_values_loss_stability,output_values_mae_stability,output_values_r2_stabil
ity,output_values_r_stability,output_values_rse_stability,output_values_rmse_stabilit
y):

file.write("\t".join(map(str, values)) + "\n")

#Data Storage

output_values_loss_stability_train1.append(output_values_loss_stability_train)

model_stability.summary()

You might also like