0% found this document useful (0 votes)
3 views

pythonprogram

The document contains Python code for various machine learning tasks using TensorFlow, including training RNNs on the MNIST dataset, building a sequence-to-sequence model for language translation, and classifying movie reviews from the IMDB dataset. It demonstrates data preprocessing, model creation, training, and evaluation for each task. Additionally, it showcases the use of LSTM and SimpleRNN layers for time series prediction.

Uploaded by

N.Akshay Kumar
Copyright
© © All Rights Reserved
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
3 views

pythonprogram

The document contains Python code for various machine learning tasks using TensorFlow, including training RNNs on the MNIST dataset, building a sequence-to-sequence model for language translation, and classifying movie reviews from the IMDB dataset. It demonstrates data preprocessing, model creation, training, and evaluation for each task. Additionally, it showcases the use of LSTM and SimpleRNN layers for time series prediction.

Uploaded by

N.Akshay Kumar
Copyright
© © All Rights Reserved
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 6

import numpy as np

import tensorflow as tf

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import SimpleRNN, Dense

from tensorflow.keras.datasets import mnist

from tensorflow.keras.utils import to_categorical

(x_train, y_train), (x_test, y_test) = mnist.load_data()

# Normalize the dataset

x_train, x_test = x_train / 255.0, x_test / 255.0

# Convert labels to one-hot encoding

y_train = to_categorical(y_train, 10)

y_test = to_categorical(y_test, 10)

# Reshape data for RNN input

n_timesteps = 28

n_features = 28

x_train = x_train.reshape(-1, n_timesteps, n_features)

x_test = x_test.reshape(-1, n_timesteps, n_features)

model = Sequential([

SimpleRNN(128, activation='relu', input_shape=(n_timesteps, n_features)),

Dense(64, activation='relu'),

Dense(10, activation='softmax')

])

model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

model.summary()

model.fit(x_train, y_train, epochs=10, batch_size=64, validation_data=(x_test, y_test))

loss, accuracy = model.evaluate(x_test, y_test)

print(f'Test Accuracy: {accuracy * 100:.2f}%')

import matplotlib.pyplot as plt


def predict_sample(index):

sample = x_test[index].reshape(1, n_timesteps, n_features)

prediction = model.predict(sample)

predicted_label = np.argmax(prediction)

plt.imshow(x_test[index].reshape(28, 28), cmap='gray')

plt.title(f'Predicted: {predicted_label}')

plt.show()

predict_sample(0)

!pip install tensorflow numpy pandas nltk

import numpy as np

import tensorflow as tf

import pandas as pd

import nltk

from tensorflow.keras.preprocessing.text import Tokenizer

from tensorflow.keras.preprocessing.sequence import pad_sequences

# Sample data

english_sentences = ["Hello, how are you?", "I am fine.", "What is your name?"]

french_sentences = ["Bonjour, comment ça va?", "Je vais bien.", "Quel est ton nom?"]

tokenizer_eng = Tokenizer()

tokenizer_eng.fit_on_texts(english_sentences)

eng_sequences = tokenizer_eng.texts_to_sequences(english_sentences)

eng_padded = pad_sequences(eng_sequences, padding='post')

# Tokenizing French sentences


tokenizer_fr = Tokenizer()

tokenizer_fr.fit_on_texts(french_sentences)

fr_sequences = tokenizer_fr.texts_to_sequences(french_sentences)

fr_padded = pad_sequences(fr_sequences, padding='post')

from tensorflow.keras.models import Model

from tensorflow.keras.layers import Input, LSTM, Dense, Embedding

# Encoder

encoder_inputs = Input(shape=(None,))

enc_emb = Embedding(input_dim=len(tokenizer_eng.word_index)+1, output_dim=64)


(encoder_inputs)

encoder_RNN = LSTM(64, return_state=True)

_, state_h, state_c = encoder_RNN(enc_emb)

encoder_states = [state_h, state_c]

# Decoder

decoder_inputs = Input(shape=(None,))

dec_emb = Embedding(input_dim=len(tokenizer_fr.word_index)+1, output_dim=64)


(decoder_inputs)

decoder_RNN = LSTM(64, return_sequences=True, return_state=True)

decoder_outputs, _, _ = decoder_RNN(dec_emb, initial_state=encoder_states)

decoder_dense = Dense(len(tokenizer_fr.word_index)+1, activation='softmax')

decoder_outputs = decoder_dense(decoder_outputs)

# Define model

model = Model([encoder_inputs, decoder_inputs], decoder_outputs)

model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])

model.summary()

model.fit([eng_padded, fr_padded], fr_padded, batch_size=32, epochs=10, validation_split=0.2)

def translate_sentence(sentence):

sequence = tokenizer_eng.texts_to_sequences([sentence])

sequence = pad_sequences(sequence, maxlen=eng_padded.shape[1], padding='post')


prediction = model.predict([sequence, fr_padded[:1]])

predicted_sentence = ' '.join([list(tokenizer_fr.word_index.keys())[np.argmax(word)] for word in


prediction[0]])

return predicted_sentence

print(translate_sentence("Hello, how are you?"))

import numpy as np

from tensorflow.keras.datasets import imdb

from tensorflow.keras.preprocessing.sequence import pad_sequences

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import LSTM, Dense, Embedding

max_features = 10000

maxlen = 500

(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=max_features)

X_train = pad_sequences(X_train, maxlen=maxlen)

X_test = pad_sequences(X_test, maxlen=maxlen)

model = Sequential()

model.add(Embedding(max_features, 32))

model.add(LSTM(32))

model.add(Dense(1, activation='sigmoid'))

model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

model.fit(X_train, y_train, epochs=10, batch_size=64, validation_data=(X_test, y_test))

loss, accuracy = model.evaluate(X_test, y_test)

print(f'Loss: {loss}, Accuracy: {accuracy}')


import numpy as np

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import LSTM, Dense

data = np.array([[i for i in range(10)] for _ in range(100)])

X, y = data[:, :-1], data[:, -1]

X = X.reshape((X.shape[0], X.shape[1], 1))

model = Sequential()

model.add(LSTM(50, activation='relu', input_shape=(9, 1)))

model.add(Dense(1))

model.compile(optimizer='adam', loss='mse')

model.fit(X, y, epochs=10, verbose=0)

test_input = np.array([7, 8, 9, 10, 11, 12, 13, 14, 15])

test_input = test_input.reshape((1, 9, 1))

print(test_input)

predicted_value = model.predict(test_input, verbose=0)

print(f'Predicted value: {predicted_value[0][0]}')

import numpy as np

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import SimpleRNN, Dense

data = np.array([[i for i in range(10)] for _ in range(100)])

X, y = data[:, :-1], data[:, -1]

X = X.reshape((X.shape[0], X.shape[1], 1))

model = Sequential()
model.add(SimpleRNN(50, activation='relu', input_shape=(9, 1)))

model.add(Dense(1))

model.compile(optimizer='adam', loss='mse')

model.fit(X, y, epochs=10, verbose=0)

test_input = np.array([7, 8, 9, 10, 11, 12, 13, 14, 15])

test_input = test_input.reshape((1, 9, 1))

print(test_input)

predicted_value = model.predict(test_input, verbose=0)

print(f'Predicted value: {predicted_value[0][0]}')

You might also like