DL
DL
import numpy as np
model = Sequential()
model.add(Dense(1, activation='sigmoid'))
predictions = model.predict(X)
print("Predictions:")
for i in range(len(predictions)):
ex2
import tensorflow as tf
(train_images,train_labels),(test_images,test_labels)=datasets.mnist.load_data()
train_images = train_images.reshape((60000,28,28,1)).astype('float32')/255
test_images = test_images.reshape((10000,28,28,1)).astype('float32')/255
model = models.Sequential()
model.add(layers.Conv2D(32,(3,3),activation='relu',input_shape=(28,28,1)))
model.add(layers.Flatten())
model.add(layers.Dense(64,activation='relu'))
model.add(layers.Dense(10,activation='softmax'))
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
history = model.fit(train_images,train_labels,epochs=5,batch_size=64,validation_split=0.2)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.show()
ex 3
import tensorflow as tf
import numpy as np
num_classes = 10
model = Sequential([
MaxPooling2D((2, 2)),
MaxPooling2D((2, 2)),
MaxPooling2D((2, 2)),
Flatten(),
Dense(128, activation='relu'),
Dense(num_classes, activation='softmax')
])
ex 4
import numpy as np
import tensorflow as tf
corpus = [
"hello other",
"hello there",
"world is beautiful",
tokenizer = Tokenizer()
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
input_sequences = []
token_list = tokenizer.texts_to_sequences([line])[0]
n_gram_sequence = token_list[:i + 1]
input_sequences.append(n_gram_sequence)
y = tf.keras.utils.to_categorical(y, num_classes=total_words)
model = Sequential()
model.add(SimpleRNN(50))
model.add(Dense(total_words, activation='softmax'))
token_list = tokenizer.texts_to_sequences([seed_text])[0]
predicted_word = tokenizer.index_word[predicted_word_index]
return predicted_word
seed_text = "hello"
next_word = predict_next_word(seed_text)
ex 5
import numpy as np
import tensorflow as tf
vocab_size = 10000
embedding_dim = 128
max_length = 100
lstm_units = 64
batch_size = 32
epochs = 5
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=vocab_size)
model = Sequential([
LSTM(lstm_units),
Dense(1, activation='sigmoid')
])
def predict_sentiment(texts):
tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=vocab_size)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
return ['Positive' if pred > 0.5 else 'Negative' for pred in predictions]
print(predict_sentiment(texts))
ex 6
import numpy as np
import tensorflow as tf
sentences = [
tags = [
word_vocab = {word: i for i, word in enumerate(set(word for sentence in sentences for word in
sentence))}
tag_vocab = {tag: i for i, tag in enumerate(set(tag for tag_list in tags for tag in tag_list))}
X = [[word_vocab[word] for word in sentence] for sentence in sentences]
embedding_dim = 64
num_words = len(word_vocab)
num_tags = len(tag_vocab)
input_layer = layers.Input(shape=(max_len,))
embedding_layer = layers.Embedding(input_dim=num_words,
output_dim=embedding_dim)(input_layer)
ex 7
import numpy as np
import tensorflow as tf
data = [
('hello', 'bonjour'),
input_tokenizer = tf.keras.preprocessing.text.Tokenizer()
input_tokenizer.fit_on_texts(input_texts)
input_sequences = input_tokenizer.texts_to_sequences(input_texts)
input_sequences = tf.keras.preprocessing.sequence.pad_sequences(input_sequences)
target_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='')
target_tokenizer.fit_on_texts(target_texts)
target_sequences = target_tokenizer.texts_to_sequences(target_texts)
target_sequences = tf.keras.preprocessing.sequence.pad_sequences(target_sequences)
target_tokenizer.word_index['\t'] = len(target_tokenizer.word_index) + 1
target_tokenizer.word_index['\n'] = len(target_tokenizer.word_index) + 1
input_vocab_size = len(input_tokenizer.word_index) + 1
target_vocab_size = len(target_tokenizer.word_index) + 1
embedding_dim = 256
latent_dim = 256
encoder_inputs = layers.Input(shape=(None,))
decoder_inputs = layers.Input(shape=(None,))
decoder_embedded = decoder_embedding(decoder_inputs)
decoder_state_input_h = layers.Input(shape=(latent_dim,))
decoder_state_input_c = layers.Input(shape=(latent_dim,))
decoder_embedded = decoder_embedding(decoder_inputs)
decoder_outputs = decoder_dense(decoder_outputs)
def decode_sequence(input_seq):
target_seq = np.array([[target_tokenizer.word_index['\t']]])
decoded_sentence = ''
used_words = set()
max_words = 50
for _ in range(max_words):
if sampled_char == '\n':
break
used_words.add(sampled_char)
target_seq = np.array([[sampled_token_index]])
states_value = [h, c]
return decoded_sentence.strip()
test_sequence = input_tokenizer.texts_to_sequences([test_sentence])
test_sequence = tf.keras.preprocessing.sequence.pad_sequences(test_sequence,
maxlen=input_sequences.shape[1])
translated_sentence = decode_sequence(test_sequence)
print("Input:", test_sentence)
print("Translated:", translated_sentence)
ex 8
import numpy as np
import tensorflow as tf
img_array = img_to_array(img)
if len(images) == 1:
axes = [axes]
ax.imshow(img / 255)
ax.axis('off')
plt.suptitle(title)
plt.show()
x_train = []
y_train = []
x_test = []
y_test = []
arctic_fox_paths = ['/1.jpg']
walrus_paths = ['/3.jpg']
polar_bear_paths = ['/2.png']
x_train.append(img)
y_train.append(label)
x_train.append(img)
y_train.append(label)
x_test.append(img)
y_test.append(label)
x_train = preprocess_input(np.array(x_train))
x_test = preprocess_input(np.array(x_test))
model = Sequential([
RandomFlip('horizontal'),
RandomTranslation(0.2, 0.2),
RandomRotation(0.2),
RandomZoom(0.2),
base_model,
Flatten(),
Dense(256, activation='relu'),
Dropout(0.5),
Dense(3, activation='softmax')
])
augmentation_model = Sequential([
RandomFlip('horizontal'),
RandomTranslation(0.2, 0.2),
RandomRotation(0.2),
RandomZoom(0.2)
])
augmented_images = []
augmented_image = augmentation_model(img)
augmented_images.append(augmented_image.numpy()[0])