0% found this document useful (0 votes)
7 views20 pages

Code

The document outlines a machine learning framework involving data preprocessing, neural network architectures, training, evaluation metrics, and main execution. It includes code for simulating datasets, processing methods, normalization, and mock implementations of CNN and PowerFNN models using NumPy. The training framework simulates model training and evaluation, showcasing validation accuracy over epochs and visualizing processed data.

Uploaded by

perammounika75
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
7 views20 pages

Code

The document outlines a machine learning framework involving data preprocessing, neural network architectures, training, evaluation metrics, and main execution. It includes code for simulating datasets, processing methods, normalization, and mock implementations of CNN and PowerFNN models using NumPy. The training framework simulates model training and evaluation, showcasing validation accuracy over epochs and visualizing processed data.

Uploaded by

perammounika75
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 20

1.

Data Preprocessing & Feature Engineering

import numpy as np

import matplotlib.pyplot as plt

# Simulate datasets

np.random.seed(0)

train_CSI = np.random.randn(100, 3, 30, 30) + 1j * np.random.randn(100, 3, 30, 30)

train_label = np.random.randint(0, 10, size=(100,))

valid_CSI = np.random.randn(20, 3, 30, 30) + 1j * np.random.randn(20, 3, 30, 30)

valid_label = np.random.randint(0, 10, size=(20,))

# Processing Methods

def process_data(csi, method='modulus'):

if method == 'modulus':

return np.abs(csi)[..., np.newaxis] # Add channel dim

elif method == 'power':

return np.sum(np.abs(csi)**2, axis=(2,3))[..., np.newaxis]

elif method == 'time_domain':

return np.abs(np.fft.ifft(csi, axis=3))[..., np.newaxis]

else:

raise ValueError("Invalid method")

# Normalization

def normalize(data):

return (data - data.min()) / (data.max() - data.min())

# Example usage

train_modulus = normalize(process_data(train_CSI, 'modulus'))

# Visualization of a sample

plt.imshow(train_modulus[0, 0, :, :], cmap='viridis')


plt.title('Sample Modulus Processed Data')

plt.colorbar()

plt.show()

Output :

2. Neural Network Architectures

import numpy as np

import matplotlib.pyplot as plt

# Simulate datasets

np.random.seed(0)

train_CSI = np.random.randn(100, 3, 30, 30) + 1j * np.random.randn(100, 3, 30, 30)

train_label = np.random.randint(0, 10, size=(100,))

valid_CSI = np.random.randn(20, 3, 30, 30) + 1j * np.random.randn(20, 3, 30, 30)

valid_label = np.random.randint(0, 10, size=(20,))

# Processing Methods

def process_data(csi, method='modulus'):

if method == 'modulus':

return np.abs(csi)[..., np.newaxis] # Add channel dim

elif method == 'power':

return np.sum(np.abs(csi)**2, axis=(2,3))[..., np.newaxis]

elif method == 'time_domain':

return np.abs(np.fft.ifft(csi, axis=3))[..., np.newaxis]


else:

raise ValueError("Invalid method")

# Normalization

def normalize(data):

return (data - data.min()) / (data.max() - data.min())

# Example usage

train_modulus = normalize(process_data(train_CSI, 'modulus'))

# Visualization of a sample

plt.imshow(train_modulus[0, 0, :, :], cmap='viridis')

plt.title('Sample Modulus Processed Data')

plt.colorbar()

plt.show()

# Mock versions of CNN and PowerFNN models using NumPy

class CNN_numpy:

def __init__(self):

pass

def forward(self, x):

# Fake "convolution + pooling + flattening"

x = x.mean(axis=(2, 3)) # Simulate pooling

x = x.reshape(x.shape[0], -1)

x = np.maximum(0, x @ np.random.randn(x.shape[1], 128)) # Random weights, ReLU

x = np.maximum(0, x @ np.random.randn(128, 2))

return x

class PowerFNN_numpy:

def __init__(self):
pass

def forward(self, x):

x = np.maximum(0, x @ np.random.randn(1, 64))

x = x @ np.random.randn(64, 2)

return x

# Example usage of the mock models

cnn_model = CNN_numpy()

powerfnn_model = PowerFNN_numpy()

# Simulate input for CNN (batch size 10, channels 1, 30, 30)

cnn_input = np.random.randn(10, 1, 30, 30)

cnn_output = cnn_model.forward(cnn_input)

print("CNN output shape:", cnn_output.shape)

# Simulate input for PowerFNN (batch size 10, feature 1)

powerfnn_input = np.random.randn(10, 1)

powerfnn_output = powerfnn_model.forward(powerfnn_input)

print("PowerFNN output shape:", powerfnn_output.shape)

Output:

CNN output shape: (10, 2)

PowerFNN output shape: (10, 2)


3. Training Framework

import numpy as np

import matplotlib.pyplot as plt

# Simulate datasets

np.random.seed(0)

train_CSI = np.random.randn(100, 3, 30, 30) + 1j * np.random.randn(100, 3, 30, 30)

train_label = np.random.randint(0, 2, size=(100,))

valid_CSI = np.random.randn(20, 3, 30, 30) + 1j * np.random.randn(20, 3, 30, 30)

valid_label = np.random.randint(0, 2, size=(20,))

# Processing Methods

def process_data(csi, method='modulus'):

if method == 'modulus':

return np.abs(csi)[..., np.newaxis] # Add channel dim


elif method == 'power':

return np.sum(np.abs(csi)**2, axis=(2,3))[..., np.newaxis]

elif method == 'time_domain':

return np.abs(np.fft.ifft(csi, axis=3))[..., np.newaxis]

else:

raise ValueError("Invalid method")

# Normalization

def normalize(data):

return (data - data.min()) / (data.max() - data.min())

# Example usage

train_modulus = normalize(process_data(train_CSI, 'modulus'))

# Visualization of a sample

plt.imshow(train_modulus[0, 0, :, :], cmap='viridis')

plt.title('Sample Modulus Processed Data')

plt.colorbar()

plt.show()

# Mock versions of CNN and PowerFNN models using NumPy

class CNN_numpy:

def __init__(self):

pass

def forward(self, x):

x = x.mean(axis=(2, 3))

x = x.reshape(x.shape[0], -1)

x = np.maximum(0, x @ np.random.randn(x.shape[1], 128))

x = np.maximum(0, x @ np.random.randn(128, 2))

return x
class PowerFNN_numpy:

def __init__(self):

pass

def forward(self, x):

x = np.maximum(0, x @ np.random.randn(1, 64))

x = x @ np.random.randn(64, 2)

return x

# Simulate Data Loaders (batches)

def create_batches(X, y, batch_size=16):

for i in range(0, len(X), batch_size):

yield X[i:i+batch_size], y[i:i+batch_size]

# NumPy version of train_model

def train_model_numpy(model, train_X, train_y, valid_X, valid_y, epochs=15, batch_size=16):

best_acc = 0

for epoch in range(epochs):

# Training step (simulated)

for inputs, labels in create_batches(train_X, train_y, batch_size):

outputs = model.forward(inputs)

# Simulate a simple loss (mean squared error)

one_hot = np.eye(2)[labels]

loss = np.mean((outputs - one_hot) ** 2)

# No optimizer, just fake step

# Validation step

correct, total = 0, 0

for inputs, labels in create_batches(valid_X, valid_y, batch_size):

outputs = model.forward(inputs)
preds = np.argmax(outputs, axis=1)

correct += np.sum(preds == labels)

total += len(labels)

acc = 100 * correct / total

print(f"Epoch {epoch+1}/{epochs} | Val Acc: {acc:.2f}%")

if acc > best_acc:

best_acc = acc

print(f"Best Validation Accuracy: {best_acc:.2f}%")

return best_acc

# Example usage of the mock models

cnn_model = CNN_numpy()

powerfnn_model = PowerFNN_numpy()

# Simulate input for CNN (batch size 10, channels 1, 30, 30)

cnn_input = np.random.randn(10, 1, 30, 30)

cnn_output = cnn_model.forward(cnn_input)

print("CNN output shape:", cnn_output.shape)

# Simulate input for PowerFNN (batch size 10, feature 1)

powerfnn_input = np.random.randn(10, 1)

powerfnn_output = powerfnn_model.forward(powerfnn_input)

print("PowerFNN output shape:", powerfnn_output.shape)

# Example training

print("\nTraining CNN_numpy model...")

train_model_numpy(cnn_model, train_modulus[:80], train_label[:80], train_modulus[80:],


train_label[80:], epochs=5)

Output :
CNN output shape: (10, 2)

PowerFNN output shape: (10, 2)

Training CNN_numpy model...

Epoch 1/5 | Val Acc: 65.00%

Epoch 2/5 | Val Acc: 45.00%

Epoch 3/5 | Val Acc: 40.00%

Epoch 4/5 | Val Acc: 35.00%

Epoch 5/5 | Val Acc: 45.00%

Best Validation Accuracy: 65.00%

4. Evaluation Metrics

import numpy as np

import matplotlib.pyplot as plt

# Simulate datasets

np.random.seed(0)

train_CSI = np.random.randn(100, 3, 30, 30) + 1j * np.random.randn(100, 3, 30, 30)

train_label = np.random.randint(0, 2, size=(100,))

valid_CSI = np.random.randn(20, 3, 30, 30) + 1j * np.random.randn(20, 3, 30, 30)


valid_label = np.random.randint(0, 2, size=(20,))

# Processing Methods

def process_data(csi, method='modulus'):

if method == 'modulus':

return np.abs(csi)[..., np.newaxis] # Add channel dim

elif method == 'power':

return np.sum(np.abs(csi)**2, axis=(2,3))[..., np.newaxis]

elif method == 'time_domain':

return np.abs(np.fft.ifft(csi, axis=3))[..., np.newaxis]

else:

raise ValueError("Invalid method")

# Normalization

def normalize(data):

return (data - data.min()) / (data.max() - data.min())

# Example usage

train_modulus = normalize(process_data(train_CSI, 'modulus'))

# Visualization of a sample

plt.imshow(train_modulus[0, 0, :, :], cmap='viridis')

plt.title('Sample Modulus Processed Data')

plt.colorbar()

plt.show()

# Mock versions of CNN and PowerFNN models using NumPy

class CNN_numpy:

def __init__(self):

pass
def forward(self, x):

x = x.mean(axis=(2, 3))

x = x.reshape(x.shape[0], -1)

x = np.maximum(0, x @ np.random.randn(x.shape[1], 128))

x = np.maximum(0, x @ np.random.randn(128, 2))

return x

class PowerFNN_numpy:

def __init__(self):

pass

def forward(self, x):

x = np.maximum(0, x @ np.random.randn(1, 64))

x = x @ np.random.randn(64, 2)

return x

# Simulate Data Loaders (batches)

def create_batches(X, y, batch_size=16):

for i in range(0, len(X), batch_size):

yield X[i:i+batch_size], y[i:i+batch_size]

# NumPy version of train_model

def train_model_numpy(model, train_X, train_y, valid_X, valid_y, epochs=15, batch_size=16):

best_acc = 0

for epoch in range(epochs):

# Training step (simulated)

for inputs, labels in create_batches(train_X, train_y, batch_size):

outputs = model.forward(inputs)

# Simulate a simple loss (mean squared error)

one_hot = np.eye(2)[labels]

loss = np.mean((outputs - one_hot) ** 2)


# No optimizer, just fake step

# Validation step

correct, total = 0, 0

for inputs, labels in create_batches(valid_X, valid_y, batch_size):

outputs = model.forward(inputs)

preds = np.argmax(outputs, axis=1)

correct += np.sum(preds == labels)

total += len(labels)

acc = 100 * correct / total

print(f"Epoch {epoch+1}/{epochs} | Val Acc: {acc:.2f}%")

if acc > best_acc:

best_acc = acc

print(f"Best Validation Accuracy: {best_acc:.2f}%")

return best_acc

# Example usage of the mock models

cnn_model = CNN_numpy()

powerfnn_model = PowerFNN_numpy()

# Simulate input for CNN (batch size 10, channels 1, 30, 30)

cnn_input = np.random.randn(10, 1, 30, 30)

cnn_output = cnn_model.forward(cnn_input)

print("CNN output shape:", cnn_output.shape)

# Simulate input for PowerFNN (batch size 10, feature 1)

powerfnn_input = np.random.randn(10, 1)

powerfnn_output = powerfnn_model.forward(powerfnn_input)

print("PowerFNN output shape:", powerfnn_output.shape)


# Example training

print("\nTraining CNN_numpy model...")

train_model_numpy(cnn_model, train_modulus[:80], train_label[:80], train_modulus[80:],


train_label[80:], epochs=5)

# Plot CDF and CCDF for signal power

def plot_cdf_ccdf(power_los, power_nlos):

plt.figure(figsize=(10,6))

# CDF for LoS

los_sorted = np.sort(power_los)

los_probs = np.arange(1, len(los_sorted)+1) / len(los_sorted)

plt.plot(los_sorted, los_probs, label='LoS CDF')

# CCDF for NLoS

nlos_sorted = np.sort(power_nlos)

nlos_probs = 1 - np.arange(1, len(nlos_sorted)+1)/len(nlos_sorted)

plt.plot(nlos_sorted, nlos_probs, label='NLoS CCDF')

plt.xlabel('Power')

plt.ylabel('Probability')

plt.title('CDF/CCDF of Signal Power')

plt.legend()

plt.grid()

plt.savefig('power_distribution.png')

plt.show()

# Example usage for CDF/CCDF plotting

power_los = np.random.randn(100) ** 2

power_nlos = np.random.randn(100) ** 2

plot_cdf_ccdf(power_los, power_nlos)
Output:

CNN output shape: (10, 2)

PowerFNN output shape: (10, 2)

Training CNN_numpy model...

Epoch 1/5 | Val Acc: 65.00%

Epoch 2/5 | Val Acc: 45.00%

Epoch 3/5 | Val Acc: 40.00%

Epoch 4/5 | Val Acc: 35.00%

Epoch 5/5 | Val Acc: 45.00%

Best Validation Accuracy: 65.00%


5. Main Execution

import numpy as np

import matplotlib.pyplot as plt

# Simulate datasets

np.random.seed(0)

train_CSI = np.random.randn(100, 3, 30, 30) + 1j * np.random.randn(100, 3, 30, 30)

train_label = np.random.randint(0, 2, size=(100,))

valid_CSI = np.random.randn(20, 3, 30, 30) + 1j * np.random.randn(20, 3, 30, 30)

valid_label = np.random.randint(0, 2, size=(20,))

# Processing Methods

def process_data(csi, method='modulus'):

if method == 'modulus':

return np.abs(csi)[..., np.newaxis] # Add channel dim

elif method == 'power':

return np.sum(np.abs(csi)**2, axis=(2,3))[..., np.newaxis]

elif method == 'time_domain':

return np.abs(np.fft.ifft(csi, axis=3))[..., np.newaxis]


else:

raise ValueError("Invalid method")

# Normalization

def normalize(data):

return (data - data.min()) / (data.max() - data.min())

# Example usage

train_modulus = normalize(process_data(train_CSI, 'modulus'))

valid_modulus = normalize(process_data(valid_CSI, 'modulus'))

train_power = normalize(process_data(train_CSI, 'power'))

valid_power = normalize(process_data(valid_CSI, 'power'))

# Visualization of a sample

plt.imshow(train_modulus[0, 0, :, :], cmap='viridis')

plt.title('Sample Modulus Processed Data')

plt.colorbar()

plt.show()

# Mock versions of CNN and PowerFNN models using NumPy

class CNN_numpy:

def __init__(self):

pass

def forward(self, x):

x = x.mean(axis=(2, 3))

x = x.reshape(x.shape[0], -1)

x = np.maximum(0, x @ np.random.randn(x.shape[1], 128))

x = np.maximum(0, x @ np.random.randn(128, 2))

return x
class PowerFNN_numpy:

def __init__(self):

pass

def forward(self, x):

x = np.maximum(0, x @ np.random.randn(1, 64))

x = x @ np.random.randn(64, 2)

return x

# Simulate Data Loaders (batches)

def create_batches(X, y, batch_size=16):

for i in range(0, len(X), batch_size):

yield X[i:i+batch_size], y[i:i+batch_size]

# NumPy version of train_model

def train_model_numpy(model, train_X, train_y, valid_X, valid_y, epochs=15, batch_size=16):

best_acc = 0

for epoch in range(epochs):

# Training step (simulated)

for inputs, labels in create_batches(train_X, train_y, batch_size):

outputs = model.forward(inputs)

one_hot = np.eye(2)[labels]

loss = np.mean((outputs - one_hot) ** 2)

# Validation step

correct, total = 0, 0

for inputs, labels in create_batches(valid_X, valid_y, batch_size):

outputs = model.forward(inputs)

preds = np.argmax(outputs, axis=1)

correct += np.sum(preds == labels)

total += len(labels)
acc = 100 * correct / total

print(f"Epoch {epoch+1}/{epochs} | Val Acc: {acc:.2f}%")

if acc > best_acc:

best_acc = acc

print(f"Best Validation Accuracy: {best_acc:.2f}%")

return best_acc

# Hyperparameters

BATCH_SIZE = 64

EPOCHS = 15

# Example training for modulus-based CNN

cnn_model = CNN_numpy()

print("\nTraining CNN_numpy model...")

train_model_numpy(cnn_model, train_modulus[:80], train_label[:80], train_modulus[80:],


train_label[80:], epochs=EPOCHS, batch_size=BATCH_SIZE)

# Plot CDF and CCDF for signal power

def plot_cdf_ccdf(power_los, power_nlos):

plt.figure(figsize=(10,6))

los_sorted = np.sort(power_los)

los_probs = np.arange(1, len(los_sorted)+1) / len(los_sorted)

plt.plot(los_sorted, los_probs, label='LoS CDF')

nlos_sorted = np.sort(power_nlos)

nlos_probs = 1 - np.arange(1, len(nlos_sorted)+1)/len(nlos_sorted)

plt.plot(nlos_sorted, nlos_probs, label='NLoS CCDF')

plt.xlabel('Power')
plt.ylabel('Probability')

plt.title('CDF/CCDF of Signal Power')

plt.legend()

plt.grid()

plt.savefig('power_distribution.png')

plt.show()

# Power threshold baseline

power_los = train_power[train_label == 0].flatten()

power_nlos = train_power[train_label == 1].flatten()

plot_cdf_ccdf(power_los, power_nlos)

# Find optimal threshold

valid_power_norm = valid_power.flatten()

valid_label_repeated = np.repeat(valid_label, valid_power.shape[1])

thresholds = np.linspace(0, 1, 100)

accuracies = []

for th in thresholds:

preds = (valid_power_norm >= th).astype(int)

acc = np.mean(preds == valid_label_repeated)

accuracies.append(acc)

best_th = thresholds[np.argmax(accuracies)]

print(f'Optimal Threshold: {best_th:.3f}, Accuracy: {max(accuracies)*100:.1f}%')

Output:

You might also like