0% found this document useful (0 votes)
4 views3 pages

Untitled 32

The document outlines a machine learning workflow using a dataset to predict specific output features through Deep Neural Network (DNN) and Recurrent Neural Network (RNN) models. It includes data preprocessing steps, model training, and evaluation metrics such as match rates for both models compared to paper values. Finally, it visualizes the relative error comparison and prints a summary table of match rates for both models against the paper values.

Uploaded by

coding1172001
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
4 views3 pages

Untitled 32

The document outlines a machine learning workflow using a dataset to predict specific output features through Deep Neural Network (DNN) and Recurrent Neural Network (RNN) models. It includes data preprocessing steps, model training, and evaluation metrics such as match rates for both models compared to paper values. Finally, it visualizes the relative error comparison and prints a summary table of match rates for both models against the paper values.

Uploaded by

coding1172001
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 3

import pandas as pd

import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout, InputLayer
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2

# Load dataset
df = pd.read_excel("dataset_28112024new.xlsx")
df = df.loc[:, ~df.columns.str.contains('^Unnamed')]

# Step 1: Compute input features from dataset


df['Ao'] = df['AV']
df['GBW'] = df['GB (MHz)']
df['PM'] = 60 # assumed
df['CMRR'] = 20 * np.log10(((df['VICMAX'] + df['VICMIN']) / 2).abs() /
(df['VICMAX'] - df['VICMIN']).abs())
df['PSRR'] = 20 * np.log10(1 / df['PDISS (uW)'])

# Define input/output
input_cols = ['Ao', 'GBW', 'PM', 'CMRR', 'PSRR']
output_cols = ['W1,2 (um)', 'W3,4 (um)', 'W5,8 (um)', 'W6 (um)', 'W7
(um)']
X = df[input_cols]
y = df[output_cols]

# Normalize
scaler_X = StandardScaler()
scaler_y = StandardScaler()
X_scaled = scaler_X.fit_transform(X)
y_scaled = scaler_y.fit_transform(y)

# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X_scaled,
y_scaled, test_size=0.1, random_state=42)

# --- DNN Model ---


dnn_model = Sequential()
dnn_model.add(InputLayer(input_shape=(X_train.shape[1],)))
for _ in range(16):
dnn_model.add(Dense(1024, activation='relu',
kernel_regularizer=l2(0.001)))
dnn_model.add(Dropout(0.2))
dnn_model.add(Dense(len(output_cols)))
dnn_model.compile(optimizer=Adam(), loss='mse')
dnn_model.fit(X_train, y_train, epochs=200, batch_size=32, verbose=0)
y_pred_dnn = dnn_model.predict(X_test)
# --- RNN Model ---
X_train_rnn = X_train.reshape((X_train.shape[0], 3, -1))
X_test_rnn = X_test.reshape((X_test.shape[0], 3, -1))
rnn_model = Sequential([
LSTM(4096, return_sequences=True, input_shape=(3,
X_train.shape[1])),
Dropout(0.2),
LSTM(4096),
Dropout(0.2),
Dense(len(output_cols))
])
rnn_model.compile(optimizer=Adam(), loss='mse')
rnn_model.fit(X_train_rnn, y_train, epochs=200, batch_size=3,
verbose=0)
y_pred_rnn = rnn_model.predict(X_test_rnn)

# --- Calculate match rates from prediction ---


def relative_error(y_true, y_pred):
return np.abs((y_true - y_pred) / y_true)

rel_error_dnn = relative_error(y_test, y_pred_dnn)


rel_error_rnn = relative_error(y_test, y_pred_rnn)

match_rate_dnn_pred = 1 - np.mean(rel_error_dnn, axis=0)


match_rate_rnn_pred = 1 - np.mean(rel_error_rnn, axis=0)
average_dnn_pred = np.mean(match_rate_dnn_pred)
average_rnn_pred = np.mean(match_rate_rnn_pred)

# --- Paper Values ---


specs = ['A0', 'GBW', 'PM', 'CMRR', 'PSRR']
match_rate_rnn_paper = [0.92390706, 0.92313453, 0.93713988,
0.98193484, 0.86360555]
match_rate_dnn_paper = [0.92818363, 1.00000000, 0.94211368,
0.98424269, 0.92548079]

# --- Plot comparison ---


x = np.arange(len(specs))
width = 0.2

plt.figure(figsize=(10, 5))
plt.bar(x - 1.5*width, 1 - np.array(match_rate_dnn_paper), width,
label='DNN (paper)', color='blue')
plt.bar(x - 0.5*width, 1 - np.array(match_rate_dnn_pred[:5]), width,
label='DNN (dataset)', color='skyblue')
plt.bar(x + 0.5*width, 1 - np.array(match_rate_rnn_paper), width,
label='RNN (paper)', color='green')
plt.bar(x + 1.5*width, 1 - np.array(match_rate_rnn_pred[:5]), width,
label='RNN (dataset)', color='lightgreen')
plt.xticks(x, specs)
plt.ylabel("Relative Error")
plt.title("Relative Error Comparison: Paper vs Dataset Prediction")
plt.legend()
plt.grid(True, axis='y', linestyle='--')
plt.tight_layout()
plt.show()

# --- Print comparison table ---


print("\nMatch Rate Comparison Table:")
print(f"{'Spec':<10} {'RNN Paper':>12} {'RNN Model':>12} {'DNN
Paper':>12} {'DNN Model':>12}")
for i, spec in enumerate(specs):
print(f"{spec:<10} {match_rate_rnn_paper[i]:>12.6f}
{match_rate_rnn_pred[i]:>12.6f} {match_rate_dnn_paper[i]:>12.6f}
{match_rate_dnn_pred[i]:>12.6f}")
print(f"{'Average':<10} {np.mean(match_rate_rnn_paper):>12.6f}
{average_rnn_pred:>12.6f} {np.mean(match_rate_dnn_paper):>12.6f}
{average_dnn_pred:>12.6f}")

You might also like