DL 7
DL 7
Program:
import tensorflow as tf
import numpy as np
num_decoder_tokens = 5
max_decoder_seq_length = 5
encoder_input_data[0, 0, 0] = 1 # "I"
encoder_input_data[0, 1, 1] = 1 # "am"
encoder_input_data[0, 2, 2] = 1 # "learning"
decoder_input_data[0, 0, 0] = 1 # "<START>"
decoder_target_data[0, 0, 1] = 1 # "J'apprends"
decoder_target_data[0, 1, 2] = 1 # "<END>"
# Encoder Model
# Decoder Model
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
initial_state=encoder_states)
decoder_outputs = decoder_dense(decoder_outputs)
# Full Model
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
batch_size=1, epochs=10)
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_inputs, initial_state=decoder_states_inputs)
decoder_outputs = decoder_dense(decoder_outputs)
[decoder_outputs] + decoder_states)
def encode_input_sentence(sentence):
return encoder_input
def decode_sequence(input_seq):
states_value = encoder_model.predict(input_seq)
decoded_sentence = ''
stop_condition = False
sampled_word = reverse_target_token_index.get(sampled_token_index,
'<UNK>')
stop_condition = True
else:
decoded_sentence += ' ' + sampled_word
states_value = [h, c]
return decoded_sentence.strip()
input_seq = encode_input_sentence(input_sentence)
translated_sentence = decode_sequence(input_seq)
print(f"Input: {input_sentence}")
print(f"Translated: {translated_sentence}")
Output: