ML Lab Test 1
ML Lab Test 1
X = dataset[:,0:8]
y = dataset[:,8]
model = Sequential()
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
_, accuracy = model.evaluate(X, y)
program 2
import numpy as np
input_img = Input(shape=(784,))
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
encoded_imgs = autoencoder.predict(x_test)
decoded_imgs = encoded_imgs
plt.figure(figsize=(20, 4))
for i in range(n):
# Original images
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Reconstructed images
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
program 3
import numpy as np
num_classes = 10
# Load the data and split it between train and test sets
model = keras.Sequential(
keras.Input(shape=input_shape),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
model.summary()
batch_size = 128
epochs = 15
program 5
import warnings
warnings.filterwarnings(action = 'ignore')
import gensim
s = sample.read()
data = []
temp = []
for j in word_tokenize(i):
temp.append(j.lower())
data.append(temp)
# Print results
model1.wv.similarity('alice', 'wonderland'))
model1.wv.similarity('alice', 'machines'))
window = 5, sg = 1)
# Print results
model2.wv.similarity('alice', 'wonderland'))
model2.wv.similarity('alice', 'machines'))