0% found this document useful (0 votes)
12 views20 pages

8&9 Assignment ADS

Uploaded by

seshasai032
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
12 views20 pages

8&9 Assignment ADS

Uploaded by

seshasai032
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 20

ASSIGNMENT-8

Problem:
a) the example attached without using predefined functions

CODE:
class NaiveBayesClassifier:
def _init_(self):
self.priors = {}
self.posteriors = {}

def fit(self, X, y):


n_samples = len(X)
n_features = len(X[0])
self.priors = {label: y.count(label) / n_samples for label in set(y)}
self.posteriors = {}

for label in self.priors:


self.posteriors[label] = {}
for feature in range(n_features):
self.posteriors[label][feature] = {}
for value in set([x[feature] for x in X]):

count = sum(1 for i in range(n_samples) if X[i][feature] == value and y[i] == label)


self.posteriors[label][feature][value] = count / y.count(label)
def predict(self, X):
predictions = []
for sample in X:

max_prob = -1
pred_label = None
for label in self.priors:
prob = self.priors[label]
for feature, value in enumerate(sample):
if value in self.posteriors[label][feature]:
prob *= self.posteriors[label][feature][value]
else:

prob *= 0.1
if prob > max_prob:
max_prob = prob
pred_label = label
predictions.append(pred_label)

return predictions
X=[
['R1', 'youth', 'high', 'no', 'fair'],

['R2', 'youth', 'high', 'no', 'excellent'],

['R3', 'middle-aged', 'high', 'no',


'fair'],

['R4', 'senior', 'medium', 'no', 'fair'],


['R5', 'senior', 'low', 'yes', 'fair'],
['R6', 'senior', 'low', 'yes', 'excellent'],
['R7', 'middle-aged', 'low', 'yes',
'excellent'],
['R8', 'youth', 'medium', 'no', 'fair'],

['R9', 'youth', 'low', 'yes', 'fair'],


['R10', 'senior', 'medium', 'yes', 'fair'],
['R11', 'youth', 'medium', 'yes',
'excellent'],
['R12', 'middle-aged', 'medium', 'no',
'excellent'],
['R13', 'middle-aged', 'high', 'yes',
'fair'],
['R14', 'senior', 'medium', 'no',
'excellent']
]
y = ['no', 'no', 'yes', 'yes', 'yes',
'no', 'yes', 'no', 'yes', 'yes',
'yes', 'yes', 'yes', 'no']
model = NaiveBayesClassifier()
model.fit(X, y)

test_data = [
['R15', 'youth', 'medium', 'yes', 'fair']
]
predictions = model.predict(test_data)
print("Predictions:",predictions)

OUTPUT:

Problem:

b) The example attached using predefined functions

CODE:
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import LabelEncoder

data = [
['R1', 'youth', 'high', 'no', 'fair'],
['R2', 'youth', 'high', 'no', 'excellent'],
['R3', 'middle-aged', 'high', 'no',
'fair'],
['R4', 'senior', 'medium', 'no', 'fair'],
['R5', 'senior', 'low', 'yes', 'fair'],
['R6', 'senior', 'low', 'yes', 'excellent'],

['R7', 'middle-aged', 'low', 'yes',


'excellent'],
['R8', 'youth', 'medium', 'no', 'fair'],
['R9', 'youth', 'low', 'yes', 'fair'],
['R10', 'senior', 'medium', 'yes', 'fair'],
['R11', 'youth', 'medium', 'yes',
'excellent'],
['R12', 'middle-aged', 'medium', 'no',
'excellent'],
['R13', 'middle-aged', 'high', 'yes',
'fair'],

['R14', 'senior', 'medium', 'no',


'excellent']

]
X_train = [row[1:] for row in data]
y_train = ['no', 'no', 'yes', 'yes', 'yes',
'no', 'yes', 'no', 'yes', 'yes',
'yes', 'yes', 'yes', 'no']
test_data = [
['R15', 'youth', 'medium', 'yes', 'fair']

]
combined_data = X_train + [row[1:] for row in test_data]
encoder = LabelEncoder()
combined_encoded = []
for i in range(len(combined_data[0])):
combined_encoded.append(encoder.fit_transform([row[i] for
row in combined_data]))
combined_encoded = list(zip(*combined_encoded))
model = GaussianNB()

model.fit(combined_encoded[:len(X_train)], y_train)
predictions = model.predict(combined_encoded[len(X_train):])
print("Predictions:", predictions)

OUTPUT:

Problem:
c) any popular dataset using predefined function

CODE:
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score

import pandas as pd
data = pd.read_csv('/content/Iris (1).csv')
if 'Id' in data.columns:
data.drop(columns=['Id'], inplace=True)
X = data.drop(columns=['Species']).values
y = data['Species'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
model = GaussianNB()

model.fit(X_train, y_train)
y_pred = model.predict(X_test)

test_case = input("Enter space-separated features for test case:


").strip().split()

test_case = [float(val) for val in test_case]


test_case = [test_case]
predicted_class = model.predict(test_case)[0]
print("Predicted class:", predicted_class)

OUTPUT:
ASSIGNMENT-9
Problem:
Implement PCA and SVD without using inbuilt functions

Apply PCA and SVD on two real world datasets, clearly mention your
observations how PCA and SVD helps the data science process

Code:
import numpy as np

def svd(X):
covariance_matrix = np.dot(X.T, X) / (X.shape[0] - 1)
eigenvalues, eigenvectors = np.linalg.eig(covariance_matrix)
sorted_indices = np.argsort(eigenvalues)[::-1]
sorted_eigenvectors = eigenvectors[:, sorted_indices]

singular_values = np.sqrt(np.abs(eigenvalues[sorted_indices]))
U = np.dot(X, sorted_eigenvectors / singular_values)
return U

print("--This is SVD implementation on 4*3 matrix--")

data=np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])
svd_data= svd(data)
print("SVD:\n",svd_data)
--This is SVD implementation on 4*3 matrix--
SVD:

[[-2.44005562e-01 1.42844716e+00 -5.27128577e-07]


[-5.95732457e-01 7.38310802e-01 -2.64495611e-07]
[-9.47459352e-01 4.81744447e-02 1.30385160e-08]
[-1.29918625e+00 -6.41961912e-01 2.75671482e-07]]
1) IRIS datset
import numpy as np

from sklearn.datasets import load_iris


def standardize_data(X):
mean = np.mean(X, axis=0)
std_dev = np.std(X, axis=0)
standardized_X = (X - mean) / std_dev

return standardized_X, mean, std_dev


def covariance_matrix(X):
n_samples = X.shape[0]
cov_matrix = np.dot(X.T, X) / n_samples
return cov_matrix

def svd(X):
standardized_X, _, _ = standardize_data(X)
# Compute covariance matrix
cov_matrix = covariance_matrix(standardized_X)
# Eigen decomposition

eigenvalues, eigenvectors = np.linalg.eig(cov_matrix)


# Sort eigenvectors based on eigenvalues
sorted_indices = np.argsort(eigenvalues)[::-1]
sorted_eigenvectors = eigenvectors[:, sorted_indices]
# Construct singular value matrix

singular_values = np.sqrt(eigenvalues[sorted_indices])
singular_value_matrix = np.diag(singular_values)
# Compute U and V
U = standardized_X.dot(sorted_eigenvectors)
V = sorted_eigenvectors.T
# Reconstruct original matrix
reconstructed_X = np.dot(U, np.dot(singular_value_matrix, V))
return U, singular_value_matrix, V, reconstructed_X
# Load Iris dataset

iris = load_iris()
X = iris.data
# SVD
U, singular_value_matrix, V, reconstructed_data_svd = svd(X)
print("\nSVD U matrix:")

print(U)
print("\nSVD Singular Value Matrix:")
print(singular_value_matrix)
print("\nSVD V matrix:")
print(V)

print("\nSVD reconstructed data:")


print(reconstructed_data_svd)

SVD U matrix:
[[-2.26470281e+00 -4.80026597e-01 -1.27706022e-01 2.41682039e-02]

[-2.08096115e+00 6.74133557e-01 -2.34608854e-01 1.03006775e-01]


[-2.36422905e+00 3.41908024e-01 4.42014848e-02 2.83770534e-02]
[-2.29938422e+00 5.97394508e-01 9.12901063e-02 -6.59555596e-02]
[-2.38984217e+00 -6.46835383e-01 1.57381957e-02 -3.59228133e-02]
[-2.07563095e+00 -1.48917752e+00 2.69682944e-02 6.60818022e-03]

[-2.44402884e+00 -4.76441976e-02 3.35470401e-01 -3.67755572e-02]


[-2.23284716e+00 -2.23148073e-01 -8.86954979e-02 -2.46120962e-02]
[-2.33464048e+00 1.11532768e+00 1.45076864e-01 -2.68592208e-02]
[-2.18432817e+00 4.69013561e-01 -2.53765567e-01 -3.98992877e-02]
[-2.16631010e+00 -1.04369065e+00 -2.68681102e-01 1.67313672e-02]
[-2.32613087e+00 -1.33078335e-01 9.37592444e-02 -1.33483413e-01]
[-2.21845090e+00 7.28676165e-01 -2.30911237e-01 2.42503814e-03]
[-2.63310070e+00 9.61506729e-01 1.80796084e-01 -1.92155336e-02]
[-2.19874060e+00 -1.86005711e+00 -4.72900998e-01 1.94731769e-01]

[-2.26221453e+00 -2.68628449e+00 3.05266093e-02 5.05337373e-02]


[-2.20758770e+00 -1.48360936e+00 -5.34409408e-03 1.88817432e-01]
[-2.19034951e+00 -4.88838316e-01 -4.42153165e-02 9.30904384e-02]
[-1.89857200e+00 -1.40501879e+00 -3.74343275e-01 6.10959671e-02]
[-2.34336905e+00 -1.12784938e+00 1.32630467e-01 -3.77564197e-02]

[-1.91432300e+00 -4.08855708e-01 -4.21292594e-01 1.09212863e-02]


[-2.20701284e+00 -9.24121427e-01 1.59865277e-01 5.95973299e-02]
[-2.77434470e+00 -4.58343668e-01 3.32179098e-01 1.96484301e-02]
[-1.81866953e+00 -8.55585263e-02 3.44885958e-02 1.51140999e-01]
[-2.22716331e+00 -1.37254455e-01 1.17993536e-01 -2.70140352e-01]

[-1.95184633e+00 6.25618588e-01 -3.05640982e-01 4.35616510e-02]


[-2.05115137e+00 -2.42163553e-01 8.63640108e-02 6.76800599e-02]
[-2.16857717e+00 -5.27149525e-01 -2.06816248e-01 1.02753930e-02]
[-2.13956345e+00 -3.13217810e-01 -2.71150240e-01 8.42592210e-02]
[-2.26526149e+00 3.37731904e-01 6.84357762e-02 -1.08279885e-01]

[-2.14012214e+00 5.04540690e-01 -7.50084417e-02 -4.81888683e-02]


[-1.83159477e+00 -4.23695068e-01 -2.70467377e-01 2.39870381e-01]
[-2.61494794e+00 -1.79357586e+00 4.72284187e-02 -2.29235932e-01]
[-2.44617739e+00 -2.15072788e+00 -8.26680451e-02 -4.82143929e-02]
[-2.10997488e+00 4.60201841e-01 -1.70274861e-01 2.90229468e-02]

[-2.20780890e+00 2.06107398e-01 -2.25441580e-01 1.68907873e-01]


[-2.04514621e+00 -6.61558111e-01 -4.84537410e-01 1.96358525e-01]
[-2.52733191e+00 -5.92292774e-01 1.94358125e-02 -1.36504550e-01]
[-2.42963258e+00 9.04180040e-01 1.93254662e-01 -9.73842285e-03]
[-2.16971071e+00 -2.68878961e-01 -1.75883821e-01 7.04740592e-03]
[-2.28647514e+00 -4.41715388e-01 3.48949090e-02 1.06983249e-01]
[-1.85812246e+00 2.33741516e+00 -2.04234223e-01 2.89863919e-01]
[-2.55363840e+00 4.79100690e-01 3.05766453e-01 -6.66014529e-02]
[-1.96444768e+00 -4.72326668e-01 3.09601318e-01 1.77093014e-01]

[-2.13705901e+00 -1.14222926e+00 2.48433561e-01 -1.51043437e-01]


[-2.06974430e+00 7.11052725e-01 -6.39298256e-02 1.40269507e-01]
[-2.38473317e+00 -1.12042970e+00 5.72178581e-02 -1.52230967e-01]
[-2.39437631e+00 3.86246873e-01 1.39467905e-01 -4.88347616e-02]
[-2.22944655e+00 -9.97959764e-01 -1.81492780e-01 -1.49281349e-02]

[-2.20383344e+00 -9.21635752e-03 -1.53029490e-01 4.93717318e-02]


[ 1.10178118e+00 -8.62972418e-01 -6.84586163e-01 3.48337755e-02]
[ 7.31337425e-01 -5.94614726e-01 -9.41217155e-02 4.90362325e-03]
[ 1.24097932e+00 -6.16297654e-01 -5.54006835e-01 9.42339702e-03]
[ 4.07483059e-01 1.75440399e+00 -2.31017678e-02 6.57688353e-02]

[ 1.07547470e+00 2.08421046e-01 -3.98255523e-01 1.04736873e-01]


[ 3.88687337e-01 5.93283636e-01 1.24191550e-01 -2.40831300e-01]
[ 7 46529741 01 7 73019312 01 1 48969403 01 7 73697853 02]

2) wine quality datset

import numpy as np
import pandas as pd
def standardize_data(X):
mean = np.mean(X, axis=0)
std_dev = np.std(X, axis=0)

standardized_X = (X - mean) / std_dev


return standardized_X, mean, std_dev
def covariance_matrix(X):
n_samples = X.shape[0]
cov_matrix = np.dot(X.T, X) / n_samples
return cov_matrix
def svd(X):
standardized_X, _, _ = standardize_data(X)
# Compute covariance matrix

cov_matrix = covariance_matrix(standardized_X)

02/04/2024, 21:47 Untitled36.ipynb - Colaboratory

https://colab.research.google.com/drive/1Jawh4jGYMCU8HRFrqtpffKvidRR0qp12#scrollTo=V
pIHL9k7i8DP&printMode=true 3/4
# Eigen decomposition

eigenvalues, eigenvectors = np.linalg.eig(cov_matrix)


# Sort eigenvectors based on eigenvalues
sorted_indices = np.argsort(eigenvalues)[::-1]
sorted_eigenvectors = eigenvectors[:, sorted_indices]
# Construct singular value matrix

singular_values = np.sqrt(eigenvalues[sorted_indices])
singular_value_matrix = np.diag(singular_values)
# Compute U and V
U = standardized_X.dot(sorted_eigenvectors)
V = sorted_eigenvectors.T

# Reconstruct original matrix


reconstructed_X = np.dot(U, np.dot(singular_value_matrix, V))
return U, singular_value_matrix, V, reconstructed_X
# Load Wine Quality dataset
data_path = "/content/WineQT.csv"

wine_quality = pd.read_csv(data_path)
X = wine_quality.values
# SVD
U, singular_value_matrix, V, reconstructed_data_svd = svd(X)
print("\nSVD U matrix:")
print(U)
print("\nSVD Singular Value Matrix:")
print(singular_value_matrix)

print("\nSVD V matrix:")
print(V)
print("\nSVD reconstructed data:")
print(reconstructed_data_svd)
SVD U matrix:

[[-1.2151326 1.79455548 -1.53760979 ... 0.19578692 0.12369741


0.01408089]
[-0.46743535 2.4779897 0.07017969 ... 0.88718415 -0.16270453
-0.17968821]
[-0.40169488 1.87194873 -0.73697061 ... 0.48202598 0.06979055

-0.16234897]
...
[-2.50698935 0.46368472 1.04675403 ... 0.7372593 -0.19267716
-0.02013498]
[-2.48395074 -0.61722305 2.20531812 ... 0.69776756 -0.35662953

-0.19522711]
[-2.611506 0.55873545 1.1233558 ... 0.36613367 -0.59729112
0.08976279]]
SVD Singular Value Matrix:
[[1.80969666 0. 0. 0. 0. 0.

0. 0. 0. 0. 0. 0.
0. ]
[0. 1.51370316 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. ]
[0. 0. 1.30464529 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. ]
[0. 0. 0. 1.10278853 0. 0.

0. 0. 0. 0. 0. 0.
0. ]
[0. 0. 0. 0. 0.98400443 0.
0. 0. 0. 0. 0. 0.
0. ]

[0. 0. 0. 0. 0. 0.96229396
0. 0. 0. 0. 0. 0.
0. ]
[0. 0. 0. 0. 0. 0.
0.79217884 0. 0. 0. 0. 0.

0. ]
[0. 0. 0. 0. 0. 0.
0. 0.74718617 0. 0. 0. 0.
0. ]
[0. 0. 0. 0. 0. 0.

0. 0. 0.6987322 0. 0. 0.
0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0.63809232 0. 0.
0. ]

[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0.55735222 0.
0. ]
[0. 0. 0. 0. 0. 0.
PCA Implementation

PCA without using inbuilt functions


import numpy as np
def pca(X, n_components):
X_meaned = X - np.mean(X, axis=0)

covariance_matrix = np.cov(X_meaned, rowvar=False)


eigenvalues, eigenvectors = np.linalg.eigh(covariance_matrix)
sorted_indices = np.argsort(eigenvalues)[::-1]
sorted_eigenvalues = eigenvalues[sorted_indices]
sorted_eigenvectors = eigenvectors[:, sorted_indices]

components = sorted_eigenvectors[:, :n_components]


projected_data = np.dot(X_meaned, components)
return projected_data
print("--This is PCA implementation on 4*3 matrix--")
data=np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])

n_components = 1
pca_data = pca(data, n_components)
print("PCA:\n",pca_data)
--This is PCA implementation on 4*3 matrix--
PCA:
[[-7.79422863]
[-2.59807621]
[ 2.59807621]
[ 7.79422863]]

1) IRIS dataset
import numpy as np
from sklearn.datasets import load_iris
def standardize_data(X):
mean = np.mean(X, axis=0)

std_dev = np.std(X, axis=0)


standardized_X = (X - mean) / std_dev
return standardized_X, mean, std_dev
def covariance_matrix(X):
n_samples = X.shape[0]

cov_matrix = np.dot(X.T, X) / n_samples


return cov_matrix
def pca(X, num_components):
standardized_X, _, _ = standardize_data(X)
cov_matrix = covariance_matrix(standardized_X)

# Eigen decomposition
eigenvalues, eigenvectors = np.linalg.eig(cov_matrix)
# Sort eigenvectors based on eigenvalues
sorted_indices = np.argsort(eigenvalues)[::-1]
sorted_eigenvectors = eigenvectors[:, sorted_indices]
# Select top k eigenvectors
principal_components = sorted_eigenvectors[:, :num_components]
# Transform data
transformed_data = np.dot(standardized_X, principal_components)

return transformed_data
# Load Iris dataset
iris = load_iris()
X = iris.data
# PCA

num_components = 3
transformed_data_pca = pca(X, num_components)
print("PCA transformed data:")
print(transformed_data_pca)
PCA transformed data:

[[-2.26470281 -0.4800266 -0.12770602]


[-2.08096115 0.67413356 -0.23460885]
[-2.36422905 0.34190802 0.04420148]
[-2.29938422 0.59739451 0.09129011]
[-2.38984217 -0.64683538 0.0157382 ]

[-2.07563095 -1.48917752 0.02696829]


[-2.44402884 -0.0476442 0.3354704 ]
[-2.23284716 -0.22314807 -0.0886955 ]
[-2.33464048 1.11532768 0.14507686]
[-2.18432817 0.46901356 -0.25376557]

[-2.1663101 -1.04369065 -0.2686811 ]


[-2.32613087 -0.13307834 0.09375924]
[-2.2184509 0.72867617 -0.23091124]
[-2.6331007 0.96150673 0.18079608]
[-2.1987406 -1.86005711 -0.472901 ]
[-2.26221453 -2.68628449 0.03052661]
[-2.2075877 -1.48360936 -0.00534409]
[-2.19034951 -0.48883832 -0.04421532]
[-1.898572 -1.40501879 -0.37434327]

[-2.34336905 -1.12784938 0.13263047]


[-1.914323 -0.40885571 -0.42129259]
[-2.20701284 -0.92412143 0.15986528]
[-2.7743447 -0.45834367 0.3321791 ]
[-1.81866953 -0.08555853 0.0344886 ]

[-2.22716331 -0.13725446 0.11799354]


[-1.95184633 0.62561859 -0.30564098]
[-2.05115137 -0.24216355 0.08636401]
[-2.16857717 -0.52714953 -0.20681625]
[-2.13956345 -0.31321781 -0.27115024]

[-2.26526149 0.3377319 0.06843578]


[-2.14012214 0.50454069 -0.07500844]
[-1.83159477 -0.42369507 -0.27046738]
[-2.61494794 -1.79357586 0.04722842]
[-2.44617739 -2.15072788 -0.08266805]

[-2.10997488 0.46020184 -0.17027486]


[-2.2078089 0.2061074 -0.22544158]
[-2.04514621 -0.66155811 -0.48453741]
[-2.52733191 -0.59229277 0.01943581]
[-2.42963258 0.90418004 0.19325466]

[-2.16971071 -0.26887896 -0.17588382]


[-2.28647514 -0.44171539 0.03489491]
[-1.85812246 2.33741516 -0.20423422]
[-2.5536384 0.47910069 0.30576645]
[-1.96444768 -0.47232667 0.30960132]
[-2.13705901 -1.14222926 0.24843356]
[-2.0697443 0.71105273 -0.06392983]
[-2.38473317 -1.1204297 0.05721786]
[-2.39437631 0.38624687 0.1394679 ]

[-2.22944655 -0.99795976 -0.18149278]


[-2.20383344 -0.00921636 -0.15302949]
[ 1.10178118 -0.86297242 -0.68458616]
[ 0.73133743 -0.59461473 -0.09412172]
[ 1.24097932 -0.61629765 -0.55400684]

[ 0.40748306 1.75440399 -0.02310177]


[ 1.0754747 0.20842105 -0.39825552]
[ 0.38868734 0.59328364 0.12419155]
[ 0.74652974 -0.77301931 0.1489694 ]
[ 0 48732274 1 85242909 0 24926527]

2) wine quality datset


import numpy as np
import pandas as pd
def standardize_data(X):

mean = np.mean(X, axis=0)


std_dev = np.std(X, axis=0)
standardized_X = (X - mean) / std_dev
return standardized_X, mean, std_dev
def covariance_matrix(X):

n_samples = X.shape[0]
cov_matrix = np.dot(X.T, X) / n_samples
return cov_matrix
def pca(X, num_components):
standardized_X, _, _ = standardize_data(X)
cov_matrix = covariance_matrix(standardized_X)
# Eigen decomposition
eigenvalues, eigenvectors = np.linalg.eig(cov_matrix)
# Sort eigenvectors based on eigenvalues

sorted_indices = np.argsort(eigenvalues)[::-1]
sorted_eigenvectors = eigenvectors[:, sorted_indices]
# Select top k eigenvectors
principal_components = sorted_eigenvectors[:, :num_components]
# Transform data

transformed_data = np.dot(standardized_X, principal_components)


return transformed_data
# Load Wine Quality dataset
data_path = "/content/WineQT.csv"
wine_quality = pd.read_csv(data_path)

X = wine_quality.values
# PCA
num_components = 3
transformed_data_pca = pca(X, num_components)
print("PCA transformed data:")

print(transformed_data_pca)
PCA transformed data:
[[-1.2151326 1.79455548 -1.53760979]
[-0.46743535 2.4779897 0.07017969]
[-0.40169488 1.87194873 -0.73697061]

...
[-2.50698935 0.46368472 1.04675403]
[-2.48395074 -0.61722305 2.20531812]
[-2.611506 0.55873545 1.1233558 ]]

You might also like