0% found this document useful (0 votes)
10 views17 pages

DL

Uploaded by

Jayasree Selvam
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
10 views17 pages

DL

Uploaded by

Jayasree Selvam
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 17

Ex 1

import numpy as np

from keras.models import Sequential

from keras.layers import Dense

X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])

y = np.array([[0], [1], [1], [0]])

model = Sequential()

model.add(Dense(8, input_dim=2, activation='relu'))

model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

model.fit(X, y, epochs=500, verbose=0)

loss, accuracy = model.evaluate(X, y)

print(f"Loss: {loss:.4f}, Accuracy: {accuracy*100:.2f}%")

predictions = model.predict(X)

print("Predictions:")

for i in range(len(predictions)):

print(f"Input: {X[i]}, Prediction: {predictions[i][0]:.4f}")

ex2

import tensorflow as tf

from tensorflow.keras import datasets,layers,models

import matplotlib.pyplot as plt

(train_images,train_labels),(test_images,test_labels)=datasets.mnist.load_data()
train_images = train_images.reshape((60000,28,28,1)).astype('float32')/255

test_images = test_images.reshape((10000,28,28,1)).astype('float32')/255

train_labels = tf.keras.utils.to_categorical(train_labels) # 'utlis' corrected to 'utils'

test_labels = tf.keras.utils.to_categorical(test_labels) # 'to_catergorical' corrected to 'to_categorical'

model = models.Sequential()

model.add(layers.Conv2D(32,(3,3),activation='relu',input_shape=(28,28,1)))

model.add(layers.MaxPooling2D((2,2))) # Capitalize 'P' in 'MaxPooling2D'

model.add(layers.Conv2D(64,(3,3),activation='relu')) # Capitalize 'C' in 'Conv2D'

model.add(layers.MaxPooling2D((2,2))) # Change 'Maxpooling2D' to 'MaxPooling2D'

model.add(layers.Conv2D(64,(3,3),activation='relu')) # Capitalize 'C' in 'Conv2D'

model.add(layers.Flatten())

model.add(layers.Dense(64,activation='relu'))

model.add(layers.Dense(10,activation='softmax'))

model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])

history = model.fit(train_images,train_labels,epochs=5,batch_size=64,validation_split=0.2)

test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)

print(f'\nTest accuracy: {test_acc}')

plt.plot(history.history['accuracy'])

plt.plot(history.history['val_accuracy'])

plt.title('Model Accuracy')

plt.ylabel('Accuracy')

plt.xlabel('Epoch')

plt.legend(['Train', 'Validation'], loc='upper left')

plt.show()

plt.plot(history.history['loss'])

plt.plot(history.history['val_loss'])

plt.title('Model Loss')

plt.ylabel('Loss')
plt.xlabel('Epoch')

plt.legend(['Train', 'Validation'], loc='upper right')

plt.show()

ex 3

import tensorflow as tf

from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense

from tensorflow.keras.models import Sequential

import numpy as np

num_classes = 10

model = Sequential([

Conv2D(32, (3, 3), activation='relu', input_shape=(128, 128, 3)),

MaxPooling2D((2, 2)),

Conv2D(64, (3, 3), activation='relu'),

MaxPooling2D((2, 2)),

Conv2D(128, (3, 3), activation='relu'),

MaxPooling2D((2, 2)),

Flatten(),

Dense(128, activation='relu'),

Dense(num_classes, activation='softmax')

])

model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

train_images = np.random.rand(100, 128, 128, 3)

train_labels = tf.keras.utils.to_categorical(np.random.randint(0, num_classes, size=(100,)),


num_classes=num_classes)
model.fit(train_images, train_labels, epochs=10, validation_split=0.2)

test_images = np.random.rand(20, 128, 128, 3)

test_labels = tf.keras.utils.to_categorical(np.random.randint(0, num_classes, size=(20,)),


num_classes=num_classes)

test_loss, test_acc = model.evaluate(test_images, test_labels)

print(f'Test accuracy: {test_acc}')

ex 4

import numpy as np

import tensorflow as tf

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import Embedding, SimpleRNN, Dense

from tensorflow.keras.preprocessing.text import Tokenizer

from tensorflow.keras.preprocessing.sequence import pad_sequences

corpus = [

"hello other",

"hello there",

"hello from the other side",

"world is beautiful",

"the other side of the world"

tokenizer = Tokenizer()

tokenizer.fit_on_texts(corpus)

total_words = len(tokenizer.word_index) + 1
input_sequences = []

for line in corpus:

token_list = tokenizer.texts_to_sequences([line])[0]

for i in range(1, len(token_list)):

n_gram_sequence = token_list[:i + 1]

input_sequences.append(n_gram_sequence)

max_sequence_length = max(len(x) for x in input_sequences)

input_sequences = pad_sequences(input_sequences, maxlen=max_sequence_length, padding='pre')

X, y = input_sequences[:, :-1], input_sequences[:, -1]

y = tf.keras.utils.to_categorical(y, num_classes=total_words)

model = Sequential()

model.add(Embedding(total_words, 10, input_length=X.shape[1]))

model.add(SimpleRNN(50))

model.add(Dense(total_words, activation='softmax'))

model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

model.fit(X, y, epochs=10, verbose=1)


def predict_next_word(seed_text):

token_list = tokenizer.texts_to_sequences([seed_text])[0]

token_list = pad_sequences([token_list], maxlen=max_sequence_length-1, padding='pre')

predicted_probs = model.predict(token_list, verbose=0)

predicted_word_index = np.argmax(predicted_probs, axis=1)[0]

predicted_word = tokenizer.index_word[predicted_word_index]

return predicted_word

seed_text = "hello"

next_word = predict_next_word(seed_text)

print(f"Next word after '{seed_text}' is '{next_word}'")

ex 5

import numpy as np

import tensorflow as tf

from tensorflow.keras.datasets import imdb

from tensorflow.keras.preprocessing.sequence import pad_sequences

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import Embedding, LSTM, Dense

vocab_size = 10000

embedding_dim = 128

max_length = 100

lstm_units = 64

batch_size = 32

epochs = 5
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=vocab_size)

x_train = pad_sequences(x_train, maxlen=max_length)

x_test = pad_sequences(x_test, maxlen=max_length)

model = Sequential([

Embedding(input_dim=vocab_size, output_dim=embedding_dim, input_length=max_length),

LSTM(lstm_units),

Dense(1, activation='sigmoid')

])

model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=0.2, verbose=1)

loss, accuracy = model.evaluate(x_test, y_test, verbose=1)

print(f'Test Loss: {loss:.4f}, Test Accuracy: {accuracy:.4f}')

def predict_sentiment(texts):

tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=vocab_size)

tokenizer.fit_on_texts(texts)

sequences = tokenizer.texts_to_sequences(texts)

padded_sequences = pad_sequences(sequences, maxlen=max_length)


predictions = model.predict(padded_sequences)

return ['Positive' if pred > 0.5 else 'Negative' for pred in predictions]

texts = ["I love this dance!", "This was terrible."]

print(predict_sentiment(texts))

ex 6

import numpy as np

from sklearn.model_selection import train_test_split

from keras.preprocessing.sequence import pad_sequences

import tensorflow as tf

from tensorflow.keras import layers, models

sentences = [

["I", "love", "coding"],

["Dogs", "are", "great"],

["Cats", "are", "cute"]

tags = [

["PRON", "VERB", "NOUN"],

["NOUN", "VERB", "ADJ"],

["NOUN", "VERB", "ADJ"]

word_vocab = {word: i for i, word in enumerate(set(word for sentence in sentences for word in
sentence))}

tag_vocab = {tag: i for i, tag in enumerate(set(tag for tag_list in tags for tag in tag_list))}
X = [[word_vocab[word] for word in sentence] for sentence in sentences]

y = [[tag_vocab[tag] for tag in tag_list] for tag_list in tags]

max_len = max(len(seq) for seq in X)

X = pad_sequences(X, maxlen=max_len, padding='post')

y = pad_sequences(y, maxlen=max_len, padding='post')

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

embedding_dim = 64

num_words = len(word_vocab)

num_tags = len(tag_vocab)

input_layer = layers.Input(shape=(max_len,))

embedding_layer = layers.Embedding(input_dim=num_words,
output_dim=embedding_dim)(input_layer)

lstm_layer = layers.LSTM(64, return_sequences=True)(embedding_layer)

output_layer = layers.Dense(num_tags, activation='softmax')(lstm_layer)

model = models.Model(inputs=input_layer, outputs=output_layer)

model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])

y_train = np.expand_dims(y_train, -1)


model.fit(X_train, y_train, batch_size=2, epochs=10)

y_test = np.expand_dims(y_test, -1)

loss, accuracy = model.evaluate(X_test, y_test)

print(f'Test Loss: {loss}, Test Accuracy: {accuracy}')

ex 7

import numpy as np

import tensorflow as tf

from tensorflow.keras import layers

from tensorflow.keras.models import Model

data = [

('hello', 'bonjour'),

('how are you?', 'comment ça va?'),

('good morning', 'bonjour'),

('good night', 'bonne nuit'),

('thank you', 'merci')

input_texts = [pair[0] for pair in data]

target_texts = ['\t' + pair[1] + '\n' for pair in data]

input_tokenizer = tf.keras.preprocessing.text.Tokenizer()

input_tokenizer.fit_on_texts(input_texts)

input_sequences = input_tokenizer.texts_to_sequences(input_texts)

input_sequences = tf.keras.preprocessing.sequence.pad_sequences(input_sequences)
target_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='')

target_tokenizer.fit_on_texts(target_texts)

target_sequences = target_tokenizer.texts_to_sequences(target_texts)

target_sequences = tf.keras.preprocessing.sequence.pad_sequences(target_sequences)

if '\t' not in target_tokenizer.word_index:

target_tokenizer.word_index['\t'] = len(target_tokenizer.word_index) + 1

if '\n' not in target_tokenizer.word_index:

target_tokenizer.word_index['\n'] = len(target_tokenizer.word_index) + 1

input_vocab_size = len(input_tokenizer.word_index) + 1

target_vocab_size = len(target_tokenizer.word_index) + 1

embedding_dim = 256

latent_dim = 256

encoder_inputs = layers.Input(shape=(None,))

encoder_embedding = layers.Embedding(input_vocab_size, embedding_dim)(encoder_inputs)

encoder_lstm = layers.LSTM(latent_dim, return_state=True)

encoder_outputs, state_h, state_c = encoder_lstm(encoder_embedding)

encoder_states = [state_h, state_c]

decoder_inputs = layers.Input(shape=(None,))

decoder_embedding = layers.Embedding(target_vocab_size, embedding_dim)

decoder_embedded = decoder_embedding(decoder_inputs)

decoder_lstm = layers.LSTM(latent_dim, return_sequences=True, return_state=True)

decoder_outputs, _, _ = decoder_lstm(decoder_embedded, initial_state=encoder_states)

decoder_dense = layers.Dense(target_vocab_size, activation='softmax')


decoder_outputs = decoder_dense(decoder_outputs)

target_data = target_sequences[:, 1:]

decoder_input_data = target_sequences[:, :-1]

model = Model([encoder_inputs, decoder_inputs], decoder_outputs)

model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])

model.fit([input_sequences, decoder_input_data], target_data, batch_size=32, epochs=10)

encoder_model = Model(encoder_inputs, encoder_states)

decoder_state_input_h = layers.Input(shape=(latent_dim,))

decoder_state_input_c = layers.Input(shape=(latent_dim,))

decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]

decoder_embedded = decoder_embedding(decoder_inputs)

decoder_outputs, state_h, state_c = decoder_lstm(decoder_embedded,


initial_state=decoder_states_inputs)

decoder_states = [state_h, state_c]

decoder_outputs = decoder_dense(decoder_outputs)

decoder_model = Model([decoder_inputs] + decoder_states_inputs, [decoder_outputs] +


decoder_states)

def decode_sequence(input_seq):

states_value = encoder_model.predict(input_seq, verbose=0)

target_seq = np.array([[target_tokenizer.word_index['\t']]])

decoded_sentence = ''

used_words = set()

max_words = 50

for _ in range(max_words):

output_tokens, h, c = decoder_model.predict([target_seq] + states_value, verbose=0)


sampled_token_index = np.argmax(output_tokens[0, -1, :])

sampled_char = target_tokenizer.index_word.get(sampled_token_index, '')

if sampled_char == '\n':

break

if sampled_char and sampled_char not in used_words:

decoded_sentence += ' ' + sampled_char.strip()

used_words.add(sampled_char)

target_seq = np.array([[sampled_token_index]])

states_value = [h, c]

return decoded_sentence.strip()

test_sentence = "thank you"

test_sequence = input_tokenizer.texts_to_sequences([test_sentence])

test_sequence = tf.keras.preprocessing.sequence.pad_sequences(test_sequence,
maxlen=input_sequences.shape[1])

translated_sentence = decode_sequence(test_sequence)

print("Input:", test_sentence)

print("Translated:", translated_sentence)

ex 8
import numpy as np

import matplotlib.pyplot as plt

from tensorflow.keras.utils import to_categorical, img_to_array, load_img

from tensorflow.keras.applications.resnet50 import preprocess_input

from tensorflow.keras.applications import ResNet50V2

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import Flatten, Dense, Dropout

from tensorflow.keras.layers import RandomFlip, RandomRotation, RandomTranslation, RandomZoom

import tensorflow as tf

def load_image(file_path, label):

img = load_img(file_path, target_size=(224, 224))

img_array = img_to_array(img)

return img_array, label

def show_images(images, title):

fig, axes = plt.subplots(1, len(images), figsize=(15, 5))

if len(images) == 1:

axes = [axes]

for ax, img in zip(axes, images):

ax.imshow(img / 255)

ax.axis('off')

plt.suptitle(title)

plt.show()

x_train = []

y_train = []

x_test = []
y_test = []

arctic_fox_paths = ['/1.jpg']

walrus_paths = ['/3.jpg']

polar_bear_paths = ['/2.png']

for path in arctic_fox_paths:

img, label = load_image(path, 0)

x_train.append(img)

y_train.append(label)

for path in walrus_paths:

img, label = load_image(path, 1)

x_train.append(img)

y_train.append(label)

for path in polar_bear_paths:

img, label = load_image(path, 2)

x_test.append(img)

y_test.append(label)

x_train = preprocess_input(np.array(x_train))

x_test = preprocess_input(np.array(x_test))

y_train_encoded = to_categorical(y_train, num_classes=3)

y_test_encoded = to_categorical(y_test, num_classes=3)

show_images(x_train, "Original Training Images")

show_images(x_test, "Original Testing Images")

base_model = ResNet50V2(weights='imagenet', include_top=False)

for layer in base_model.layers:


layer.trainable = False

model = Sequential([

RandomFlip('horizontal'),

RandomTranslation(0.2, 0.2),

RandomRotation(0.2),

RandomZoom(0.2),

base_model,

Flatten(),

Dense(256, activation='relu'),

Dropout(0.5),

Dense(3, activation='softmax')

])

model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

augmentation_model = Sequential([

RandomFlip('horizontal'),

RandomTranslation(0.2, 0.2),

RandomRotation(0.2),

RandomZoom(0.2)

])

augmented_images = []

for img in x_train[:8]:

img = img.reshape((1,) + img.shape)

augmented_image = augmentation_model(img)

augmented_images.append(augmented_image.numpy()[0])

show_images(augmented_images, "Augmented Training Images")


model.fit(x_train, y_train_encoded, validation_data=(x_test, y_test_encoded), epochs=5, batch_size=32)

You might also like