0% found this document useful (0 votes)
7 views

Performance Testing

Uploaded by

Dhawal Soni
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
7 views

Performance Testing

Uploaded by

Dhawal Soni
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 15

Performance testing

import shutil
import pandas as pd
import os

def utility(data_path= 'train', csv_path = 'trainLabels.csv'):


data_df = pd.read_csv(csv_path, header=0)
image_names = os.listdir(data_path)

labels_dict = {}
for image_name in image_names:
image_id = os.path.splitext(image_name)
image_id = int(image_id[0])
image_label = data_df.iloc[image_id-1, 1]

label_path = os.path.join(data_path, image_label)

if not os.path.exists(label_path):
labels_dict[image_label] = 0
os.makedirs(label_path)

image_name_updated = image_label + str(labels_dict[image_label]) + '.jpg'


shutil.move(os.path.join(data_path, image_name), os.path.join(label_path,
image_name_updated))
labels_dict[image_label] += 1

if __name__== '__main__':
utility()

This script organizes image data by moving images into folders based on their corresponding labels. It
reads labels from a CSV file and matches them with image filenames in a specified directory. Each image
is moved into a subfolder named after its label. If the label folder doesn't exist, it creates one. If multiple
images have the same label, a counter is used to differentiate their filenames within the label folder. This
process ensures that images are structured in a way that facilitates training and validation of machine
learning models. The `utility()` function takes the path to the directory containing the images
(`data_path`) and the path to the CSV file containing image labels (`csv_path`) as input parameters. When
executed as the main script, it organizes the image data accordingly

from distutils.core import setup


from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_modules = [
Extension("cifar_utility", ["cifar_utility.py"]),
Extension("color", ["color.py"]),
Extension("image_directory_folder_change", ["image_directory_folder_change.py"]),
Extension("logger", ["logger.py"]),
Extension("utility", ["utility.py"]),

# ... all your modules that need be compiled ...


]
setup(
name = 'My Program Name',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules
)

This setup script is used to compile multiple Python modules into C extension modules using Cython. It
imports necessary modules from `distutils` and `Cython.Distutils`, defines a list of extension modules
along with their corresponding Python source files, and then sets up the compilation process. Each
extension module is defined by an `Extension` object where the first argument is the name of the module
and the second argument is the list of source files. Finally, the `setup()` function is called with parameters
specifying the program name, the command class for building extensions (`build_ext`), and the list of
extension modules to be compiled. When executed, this script compiles the specified Python modules into
shared libraries for improved performance.

import os
import os.path
import shutil

def image_directory_folder_change(folder_path, output_path):


images = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path,
f))]

for image in images:


folder_name = image.split('_')[0]

new_path = os.path.join(folder_path, folder_name)


os.makedirs(output_path, exist_ok=True)
old_image_path = os.path.join(folder_path, image)
new_image_path = os.path.join(new_path, image)
shutil.move(old_image_path, new_image_path)

This Python function, `image_directory_folder_change`, takes two arguments: `folder_path`, which is the
path to the directory containing images, and `output_path`, which is the path to the directory where the
images will be moved after restructuring. It iterates over all files in the `folder_path` directory, extracts
the folder name from each image filename (assuming the folder name is before the first underscore in the
filename), creates a new directory with that folder name in the `output_path` directory, and moves each
image to its corresponding folder in the `output_path`. If the folder already exists, it will skip creating a
new one. This function essentially restructures the directory by moving images into subdirectories based
on their names.

import distutils.dir_util
import datetime
import os
import shutil
import time
import logging
import mlflow
import cv2
import numpy as np
from tensorflow.keras.preprocessing import image

# separator symbol
sep = "/"

class bcolors:
OK = '\033[92m'
WARNING = '\033[93m' #YELLOW
FAIL = '\033[91m' #RED
RESET = '\033[0m' #RESET COLOR
green = '\033[0;32;47m'
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'

# Utility function to load data


def load_image(img_path, img_shape):
img = image.load_img(img_path)
# normalise the image between [0,1]
# img = np.array(img)/255.0
img = cv2.normalize(np.array(img), dst=None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_64F)
img = cv2.resize(img, img_shape)
return img

def mlflow_logging(experiment_name, metrics=[], params = [], artifacts = [], tags={}):


try:
mlflow.create_experiment(name = experiment_name)
except:
mlflow.set_experiment(experiment_name)

with mlflow.start_run(run_name = experiment_name, nested=True):


for artifact in artifacts:
mlflow.log_artifact(artifact)
for metric in metrics:
mlflow.log_metrics(metric)

for key, val in tags.items():


mlflow.set_tag(key, val)

for param in params:


mlflow.log_params(param)

def remove_previous_result():
print('Removing previous results if any..')

try:
# Copy Previous Results directory
if os.path.isdir('./Results'):
date_time = str(datetime.datetime.now()).split(' ')

time = date_time[1].split('.')[0].split(':')
time_path = os.path.join(date_time[0], str(time[0] + '-' + time[1] + '-' +
time[2]))

distutils.dir_util.mkpath(os.path.join('./PreviousResults', time_path) ,
verbose=True)

shutil.move('./Results', os.path.join('./PreviousResults', time_path))


shutil.rmtree('./Results')
print('"Result" folder is successfully moved')

except Exception as e:
print(e.__class__)
print('The directory name "Results" is not found')

try:
# removing Results director
shutil.rmtree('./Results')
print('"Result" folder is successfully removed')
except Exception as e:
print(e.__class__)
print('The directory name "Results" is not found')

try:
# removing Results directory
shutil.rmtree('./mlruns')
print('"mlruns" folder is successfully removed')
except Exception as e:
print(e.__class__)
print('The directory name "mlruns" is not found')
try:
# removing Results directory
# removing mlruns database
os.remove('./mlruns.db')
print('"mlruns.db" is successfully removed')
except Exception as e:
print(e.__class__)
print('The file name "mlruns.db" is not found')
return None

def logger_setup(name, log_file_name, level=logging.DEBUG):


os.makedirs('logging', exist_ok=True)
formatter =
logging.Formatter('%(asctime)s:%(levelname)s:%(message)s:%(filename)s:%(module)s')
handler = logging.FileHandler(os.path.join('logging', log_file_name))
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
return logger

if __name__ == "__main__":
remove_previous_result()

This script contains several utility functions for common tasks such as loading images, logging, MLflow
integration, and result directory management. The `bcolors` class defines color codes for console output.
The `load_image` function loads an image from a file path and normalizes it. The `mlflow_logging`
function sets up an MLflow experiment, logs metrics, parameters, artifacts, and tags for the run. The
`remove_previous_result` function removes previous result directories and MLflow logs. The
`logger_setup` function sets up logging to a file with a specified name and level. The script also removes
previous result directories and MLflow logs when executed as a main script.

Attack vector executor:

import license_execute as le
license_status = le.check()
if license_status == 'Valid@tAi':
# after installing the necessary packages, import both art_evasion_whitebox and
art_evasion_blackbox
import logging
import os
import pickle
import sys
from configparser import ConfigParser, ExtendedInterpolation
from Utility import utility
import h5py
import matplotlib.pyplot as plt
import mlflow
import numpy as np
import tensorflow as tf
import tensorflow.keras

from AttackVectors import art_evasion_blackbox


from AttackVectors import art_evasion_whitebox
# Loading all the config
from Utility.utility import sep

# Use dark background


plt.style.use('dark_background')
# Remove Warnings
plt.rcParams.update({'figure.max_open_warning': 0})

parser = ConfigParser(interpolation=ExtendedInterpolation())
parser.read('./Configs/config_art.properties') # Path of the config file

# mlflow
experiment_name = parser.get('mlflow', 'experiment_name')
run_no = parser.getint('mlflow', 'run_no')

# parameter
model_path = parser.get('parameters', 'model')
x_test_path = parser.get('parameters', 'x_test')
y_test_path = parser.get('parameters', 'y_test')
class_name_to_label_dict_path = parser.get('parameters', 'class_name_to_label_dict')
classifier_type = parser.get('parameters', 'classifier_type')
save_path = parser.get('parameters', 'save_path')
n_sample = parser.getint('parameters', 'n_sample', fallback=32)

def art_pipeline_execution():
# setting up MLflow experiment
# try:
# mlflow.create_experiment(experiment_name)
# except:
# pass
# mlflow.set_tracking_uri("sqlite:///mlruns.db")
# mlflow.set_experiment(experiment_name)
# with mlflow.start_run(run_name=experiment_name) as run:
# exp_id = run.info.experiment_id
# run_id = run.info.run_uuid

# loading model

model = tensorflow.keras.models.load_model(model_path)

# loading dataset
with h5py.File(x_test_path, 'r') as hf:
x_test = hf['X_test'][:]
with h5py.File(y_test_path, 'r') as hf:
y_test = hf['y_test'][:]

with open(class_name_to_label_dict_path, 'rb') as f:


class_name_to_label_dict = pickle.load(f)

# choosing index for random 'n_sample'


try:
random_n_sample_idx = np.random.choice(x_test.shape[0], size=n_sample)
x_test = np.array(x_test[random_n_sample_idx])
y_test = y_test[random_n_sample_idx]
except:
x_test = np.array(x_test)[:]
y_test = y_test[:]

art_corner_cases_x_test = list()
art_corner_cases_y_test = list()
art_corner_cases_y_test_pred = list()

# Black Box Implementation


print("ART black box testing...")
if parser.getboolean('parameters', 'black_box_execution'):
if parser.getboolean('HSJ', 'set_execution'):
print("x_test", x_test.shape, y_test.shape, save_path)
print("save_path", save_path)
art_corner_cases = art_evasion_blackbox.ArtEvasionBlackbox(
model, x_test, y_test, save_path,
classifier_type=classifier_type).HSJ()

# saving corner cases of HSJ


art_corner_cases_x_test.extend(art_corner_cases[0])
art_corner_cases_y_test_pred.extend(art_corner_cases[1])
art_corner_cases_y_test.extend(art_corner_cases[2])

if parser.getboolean('PA', 'set_execution'):
art_corner_cases = art_evasion_blackbox.ArtEvasionBlackbox(model, x_test,
y_test, save_path,

classifier_type=classifier_type).PA()
# saving corner cases of PA
art_corner_cases_x_test.extend(art_corner_cases[0])
art_corner_cases_y_test_pred.extend(art_corner_cases[1])
art_corner_cases_y_test.extend(art_corner_cases[2])

if parser.getboolean('ZOO', 'set_execution'):
art_corner_cases = art_evasion_blackbox.ArtEvasionBlackbox(model, x_test,
y_test, save_path,

classifier_type=classifier_type).ZOO()
# saving corner cases of ZOO
art_corner_cases_x_test.extend(art_corner_cases[0])
art_corner_cases_y_test_pred.extend(art_corner_cases[1])
art_corner_cases_y_test.extend(art_corner_cases[2])

if parser.getboolean('ST', 'set_execution'):
art_corner_cases = art_evasion_blackbox.ArtEvasionBlackbox(model, x_test,
y_test, save_path,

classifier_type=classifier_type).ST()
# saving corner cases of ST
art_corner_cases_x_test.extend(art_corner_cases[0])
art_corner_cases_y_test_pred.extend(art_corner_cases[1])
art_corner_cases_y_test.extend(art_corner_cases[2])

if parser.getboolean('DBB', 'set_execution'):
art_corner_cases = art_evasion_blackbox.ArtEvasionBlackbox(model, x_test,
y_test, save_path,

classifier_type=classifier_type).DBB(
delta=parser.getfloat('DBB', 'delta'), epsilon=parser.getfloat('DBB',
'epsilon'))
# saving corner cases of DBB
art_corner_cases_x_test.extend(art_corner_cases[0])
art_corner_cases_y_test_pred.extend(art_corner_cases[1])
art_corner_cases_y_test.extend(art_corner_cases[2])

print("ART black box testing complete.")

# White box Implementation


print("ART White box testing...")
if parser.getboolean('parameters', 'white_box_execution'):
if parser.getboolean('CLInf', 'set_execution'):
art_corner_cases = art_evasion_whitebox.ArtEvasionWhitebox(model, x_test,
y_test, save_path,

classifier_type=classifier_type).CLInf()
art_corner_cases_x_test.extend(art_corner_cases[0])
art_corner_cases_y_test_pred.extend(art_corner_cases[1])
art_corner_cases_y_test.extend(art_corner_cases[2])

if parser.getboolean('FGSM', 'set_execution'):
art_corner_cases = art_evasion_whitebox.ArtEvasionWhitebox(model, x_test,
y_test, save_path,

classifier_type=classifier_type).FGSM(
parser.getfloat('FGSM', 'epsilon'))
art_corner_cases_x_test.extend(art_corner_cases[0])
art_corner_cases_y_test_pred.extend(art_corner_cases[1])
art_corner_cases_y_test.extend(art_corner_cases[2])

if parser.getboolean('IFS', 'set_execution'):
art_corner_cases = art_evasion_whitebox.ArtEvasionWhitebox(model, x_test,
y_test, save_path,

classifier_type=classifier_type).IFS()
art_corner_cases_x_test.extend(art_corner_cases[0])
art_corner_cases_y_test_pred.extend(art_corner_cases[1])
art_corner_cases_y_test.extend(art_corner_cases[2])

if parser.getboolean('UP', 'set_execution'):
if parser.getboolean('UP', 'targeted'):
attack_type = 'T'
else:
attack_type = 'U'

art_corner_cases = art_evasion_whitebox.ArtEvasionWhitebox(model, x_test,


y_test, save_path,

classifier_type=classifier_type).UP(
type=attack_type,
max_iter=parser.getint(
'UP', 'max_iter'),
attacker=parser.get('UP',
'attacker'))
art_corner_cases_x_test.extend(art_corner_cases[0])
art_corner_cases_y_test_pred.extend(art_corner_cases[1])
art_corner_cases_y_test.extend(art_corner_cases[2])

if parser.getboolean('EN', 'set_execution'):
art_corner_cases = art_evasion_whitebox.ArtEvasionWhitebox(model, x_test,
y_test, save_path,

classifier_type=classifier_type).EN()
art_corner_cases_x_test.extend(art_corner_cases[0])
art_corner_cases_y_test_pred.extend(art_corner_cases[1])
art_corner_cases_y_test.extend(art_corner_cases[2])

if parser.getboolean('JSM', 'set_execution'):
art_corner_cases = art_evasion_whitebox.ArtEvasionWhitebox(model, x_test,
y_test, save_path,

classifier_type=classifier_type).JSM()
art_corner_cases_x_test.extend(art_corner_cases[0])
art_corner_cases_y_test_pred.extend(art_corner_cases[1])
art_corner_cases_y_test.extend(art_corner_cases[2])

if parser.getboolean('DF', 'set_execution'):
art_corner_cases = art_evasion_whitebox.ArtEvasionWhitebox(model, x_test,
y_test, save_path,

classifier_type=classifier_type).DF()
art_corner_cases_x_test.extend(art_corner_cases[0])
art_corner_cases_y_test_pred.extend(art_corner_cases[1])
art_corner_cases_y_test.extend(art_corner_cases[2])

if parser.getboolean('NF', 'set_execution'):
art_corner_cases = art_evasion_whitebox.ArtEvasionWhitebox(model, x_test,
y_test, save_path,

classifier_type=classifier_type).NF()
art_corner_cases_x_test.extend(art_corner_cases[0])
art_corner_cases_y_test_pred.extend(art_corner_cases[1])
art_corner_cases_y_test.extend(art_corner_cases[2])

print("ART White box testing complete.")

# Save as npz format


np.save(sep.join([save_path, 'art_corner_cases_x_test.npy']),
np.array(art_corner_cases_x_test))
np.save(sep.join([save_path, 'art_corner_cases_y_test.npy']),
np.array(art_corner_cases_y_test))
np.save(sep.join([save_path, 'art_corner_cases_y_test_pred.npy']),
np.array(art_corner_cases_y_test_pred))

# Logging import metadata into mlflow


# mlflow.set_tags(class_name_to_label_dict)
# mlflow.log_artifact(save_path)
utility.mlflow_logging(experiment_name = 'Attack Vectors',artifacts =
[save_path],tags = (class_name_to_label_dict))
# print("exp_id: ", exp_id)
# print("experiment_name", experiment_name)
# print("run_id: ", run_id)
return None

def black_box_execution_only():
# setting up MLflow experiment
try:
mlflow.create_experiment(experiment_name)
except:
pass
# mlflow.set_tracking_uri("sqlite:///mlruns.db")
# mlflow.set_experiment(experiment_name)
# with mlflow.start_run(run_name=experiment_name) as run:
# exp_id = run.info.experiment_id
# run_id = run.info.run_uuid

# loading model

model = tensorflow.keras.models.load_model(model_path)

# loading dataset
with h5py.File(x_test_path, 'r') as hf:
x_test = hf['X_test'][:]

with h5py.File(y_test_path, 'r') as hf:


y_test = hf['y_test'][:]

with open(class_name_to_label_dict_path, 'rb') as f:


class_name_to_label_dict = pickle.load(f)

# choosing index for random 'n_sample'


try:
random_n_sample_idx = np.random.choice(x_test.shape[0], size=n_sample)
x_test = np.array(x_test[random_n_sample_idx])
y_test = y_test[random_n_sample_idx]
except:
x_test = np.array(x_test)[:]
y_test = y_test[:]

art_corner_cases_x_test = list()
art_corner_cases_y_test = list()
art_corner_cases_y_test_pred = list()

# Black Box Implementation


print("ART black box testing...")

if parser.getboolean('HSJ', 'set_execution'):
print("x_test", x_test.shape, y_test.shape, save_path)
print("save_path", save_path)
art_corner_cases = art_evasion_blackbox.ArtEvasionBlackbox(
model, x_test, y_test, save_path, classifier_type=classifier_type).HSJ()

# saving corner cases of HSJ


art_corner_cases_x_test.extend(art_corner_cases[0])
art_corner_cases_y_test_pred.extend(art_corner_cases[1])
art_corner_cases_y_test.extend(art_corner_cases[2])

if parser.getboolean('PA', 'set_execution'):
art_corner_cases = art_evasion_blackbox.ArtEvasionBlackbox(model, x_test,
y_test, save_path,

classifier_type=classifier_type).PA()
# saving corner cases of PA
art_corner_cases_x_test.extend(art_corner_cases[0])
art_corner_cases_y_test_pred.extend(art_corner_cases[1])
art_corner_cases_y_test.extend(art_corner_cases[2])

if parser.getboolean('ZOO', 'set_execution'):
art_corner_cases = art_evasion_blackbox.ArtEvasionBlackbox(model, x_test,
y_test, save_path,

classifier_type=classifier_type).ZOO()
# saving corner cases of ZOO
art_corner_cases_x_test.extend(art_corner_cases[0])
art_corner_cases_y_test_pred.extend(art_corner_cases[1])
art_corner_cases_y_test.extend(art_corner_cases[2])

if parser.getboolean('ST', 'set_execution'):
art_corner_cases = art_evasion_blackbox.ArtEvasionBlackbox(model, x_test,
y_test, save_path,

classifier_type=classifier_type).ST()
# saving corner cases of ST
art_corner_cases_x_test.extend(art_corner_cases[0])
art_corner_cases_y_test_pred.extend(art_corner_cases[1])
art_corner_cases_y_test.extend(art_corner_cases[2])

if parser.getboolean('DBB', 'set_execution'):
art_corner_cases = art_evasion_blackbox.ArtEvasionBlackbox(model, x_test,
y_test, save_path,

classifier_type=classifier_type).DBB(
delta=parser.getfloat('DBB', 'delta'), epsilon=parser.getfloat('DBB',
'epsilon'))
# saving corner cases of DBB
art_corner_cases_x_test.extend(art_corner_cases[0])
art_corner_cases_y_test_pred.extend(art_corner_cases[1])
art_corner_cases_y_test.extend(art_corner_cases[2])

print("ART black box testing complete.")

# Save as npz format


np.save(sep.join([save_path, 'art_corner_cases_x_test.npy']),
np.array(art_corner_cases_x_test))
np.save(sep.join([save_path, 'art_corner_cases_y_test.npy']),
np.array(art_corner_cases_y_test))
np.save(sep.join([save_path, 'art_corner_cases_y_test_pred.npy']),
np.array(art_corner_cases_y_test_pred))

# Logging import metadata into mlflow


# mlflow.set_tags(class_name_to_label_dict)
# mlflow.log_artifact(save_path)
utility.mlflow_logging(experiment_name = 'Attack Vectors',artifacts =
[save_path],tags = (class_name_to_label_dict))
# print("exp_id: ", exp_id)
# print("experiment_name", experiment_name)
# print("run_id: ", run_id)
return None

def black_box_main_script():
# disabling eager execution for ART execution
tf.compat.v1.disable_eager_execution()
# logging directory
os.makedirs('logging', exist_ok=True)

logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter =
logging.Formatter('%(asctime)s:%(levelname)s:%(message)s:%(filename)s:%(module)s')
file_handler = logging.FileHandler('logging/art_pipeline_log.log')
file_handler.setFormatter(formatter)

logger.addHandler(file_handler)

try:
black_box_execution_only()
logger.info("ART pipeline successfully executed.")
except:
logger.critical('Please fix Modeling pipeline')
logger.exception('ART pipeline error!!')
sys.exit(1)

return None

def main_script():
# disabling eager execution for ART execution
tf.compat.v1.disable_eager_execution()

# logging directory
os.makedirs('logging', exist_ok=True)

logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter =
logging.Formatter('%(asctime)s:%(levelname)s:%(message)s:%(filename)s:%(module)s')
file_handler = logging.FileHandler('logging/art_pipeline_log.log')
file_handler.setFormatter(formatter)

logger.addHandler(file_handler)

try:
art_pipeline_execution()
logger.info("ART pipeline successfully executed.")
except Exception as e:
logger.critical('Please fix Modeling pipeline')
logger.exception('ART pipeline error!!')
logger.exception(e)
raise e

return None
else:
print("Invalid License")

if __name__ == '__main__':
main_script()

This script is an Adversarial Robustness Toolbox (ART) pipeline that tests machine learning models
against black-box and white-box attack vectors. It configures attack parameters, loads the model and test
data, executes the attacks, and logs results using MLflow. It also handles errors and logs exceptions. If the
license is valid, it runs the pipeline; otherwise, it terminates.

import os
import open_compile
from AttackVectors import AV_compile
from DataDiagnostic import DD_compile
from Deepxplore import DE_compile
from MetamorphicTesting.helper_functions import MT_compile
from ModelExplainability import ME_compile
from ModelingPipeline import MP_compile
from tensorflow_privacy import tp_compile
from tensorflow_privacy.privacy import privacy_compile
from tensorflow_privacy.privacy.analysis import analysis_compile
# from tensorflow_privacy.privacy.bolt_on import bolt_compile
from tensorflow_privacy.privacy.dp_query import dp_compile
from tensorflow_privacy.privacy.estimators import estimators_compile
from tensorflow_privacy.privacy.estimators.v1 import v1_compile
from tensorflow_privacy.privacy.keras_models import keras_compile
from tensorflow_privacy.privacy.membership_inference_attack import MIA_compile
from tensorflow_privacy.privacy.membership_inference_attack.codelabs import
codelabs_compile
from tensorflow_privacy.privacy.optimizers import optimizers_compile
from Utility import utility_compile

def execute_compile():
open_compile.run_compile()

os.chdir("./AttackVectors")
AV_compile.run_compile()

os.chdir("../DataDiagnostic")
DD_compile.run_compile()
os.chdir("../Deepxplore")
DE_compile.run_compile()

os.chdir("../MetamorphicTesting/helper_functions")
MT_compile.run_compile()

os.chdir("../")
os.chdir("../ModelExplainability")
ME_compile.run_compile()

os.chdir("../ModelingPipeline")
MP_compile.run_compile()

os.chdir("../Utility")
utility_compile.run_compile()

if __name__ == '__main__':
execute_compile()

This script compiles various modules and packages related to machine learning model evaluation and
privacy analysis. It executes compilation functions from different directories such as AttackVectors,
DataDiagnostic, Deepxplore, MetamorphicTesting, ModelExplainability, ModelingPipeline, and Utility.
Finally, it runs the compilation process for each module.

You might also like