gpt-2 code
gpt-2 code
import numpy as np
import matplotlib.pyplot as plt
import torch
from sklearn.metrics import confusion_matrix
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Input, Embedding, Conv1D, MaxPooling1D, GRU,
Dense, Dropout, Concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from transformers import GPT2Tokenizer, GPT2Model
print('import done')
# Define hyperparameters
MAX_SEQ_LENGTH = 100
MAX_NB_WORDS = 20000
EMBEDDING_DIM = 100
FILTER_SIZES = [3, 5, 7]
NUM_FILTERS = 256
GRU_UNITS = 256
DENSE_UNITS = 1
DROPOUT_RATE = 0.8
print('config done')
def extract_gpt_features(text):
inputs = tokenizer.encode_plus(text, add_special_tokens=True,
return_tensors="pt")
inputs = inputs.to(device)
with torch.no_grad():
outputs = gpt_model(**inputs)[0]
# Plot accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(['Train', 'Validation'], loc='upper left')
plt.show()