AI Lab Programs
AI Lab Programs
corpus importstopwords
fromnltk.tokenize importword_tokenize
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(example_sent)
Output:
Original Sentence: This is a sample sentence, showing off the stop words filtration.
Word Tokens: ['This', 'is', 'a', 'sample', 'sentence', ',', 'showing', 'off', 'the', 'stop', 'words',
'filtration', '.']
Filtered Sentence (with lowercase conversion): ['sample', 'sentence', ',', 'showing', 'stop',
'words', 'filtration', '.']
Filtered Sentence (without lowercase conversion): ['This', 'sample', 'sentence', ',', 'showing',
'stop', 'words', 'filtration', '.']
# choose some words to be stemmed
words = ["program", "programs", "programmer", "programming",
"programmers"]
forw inwords:
print(w, ":", ps.ste# import these modules
fromnltk.stem importPorterStemmer
fromnltk.tokenize importword_tokenize
ps = PorterStemmer()m(w))
Output:
program : program
programs : program
programmer : programm
programming : program
programmers : programm
importnltk
fromnltk.corpus importstopwords
fromnltk.tokenize importword_tokenize, sent_tokenize
stop_words = set(stopwords.words('english'))
tokenized = sent_tokenize(txt)
fori intokenized:
wordsList = nltk.word_tokenize(i)
wordsList = [w.lower() forw inwordsList ifnotw.lower()
instop_words]
tagged = nltk.pos_tag(wordsList)
print(tagged)
Output:
[nltk_data] Downloading package stopwords to /root/nltk_data...
[nltk_data] /root/nltk_data...
[('sukanya', 'NN'), (',', ','), ('rajib', 'VB'), ('naba', 'RB'), ('good', 'JJ'),
('friends', 'NNS'), ('.', '.')]
[('marriage', 'NN'), ('big', 'JJ'), ('step', 'NN'), ('one', 'CD'), ('’', 'NN'),
('life.it', 'NN'), ('exciting', 'VBG'), ('frightening', 'NN'), ('.', '.')]
importmatplotlib.pyplot asplt
fromsklearn.datasets importmake_blobs
fromsklearn.model_selection importtrain_test_split
fromsklearn.neural_network importMLPClassifier
fromsklearn.metrics importaccuracy_score
n_samples = 200
blob_centers = ([1, 1], [3, 4], [1, 3.3], [3.5, 1.8])
data, labels = make_blobs(n_samples=n_samples, centers=blob_centers,
cluster_std=0.5, random_state=0)
forn_class inrange(len(blob_centers)):
ax.scatter(data[labels == n_class][:, 0], data[labels ==
n_class][:, 1], c=colours[n_class], s=30, label=str(n_class))
clf.score(train_data, train_labels)
predictions_train = clf.predict(train_data)
predictions_test = clf.predict(test_data)
predictions_train[:20]
fori inrange(len(clf.coefs_)):
number_neurons_in_layer = clf.coefs_[i].shape[1]
forj inrange(number_neurons_in_layer):
weights = clf.coefs_[i][:, j]
print(i, j, weights, end=", ")
print()
result = clf.predict([[0, 0], [0, 1], [1, 0], [0, 1], [1, 1], [2., 2.],
[1.3, 1.3], [2, 4.8]])
prob_results = clf.predict_proba([[0, 0], [0, 1], [1, 0], [0, 1], [1,
1], [2., 2.], [1.3, 1.3], [2, 4.8]])
print(prob_results)
# Install NLTK
pairs = [
],
],
],
r"sorry (.*)",
],
r"quit",
# Start chatting
chatbot.converse()
Output:
Requirement already satisfied: nltk in /usr/local/lib/python3.10/dist-
packages (3.8.1) Requirement already satisfied: click in
/usr/local/lib/python3.10/dist-packages (from nltk) (8.1.7) Requirement
already satisfied: joblib in /usr/local/lib/python3.10/dist-packages
(from nltk) (1.3.2) Requirement already satisfied: regex>=2021.8.3 in
/usr/local/lib/python3.10/dist-packages (from nltk) (2023.12.25)
Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-
packages (from nltk) (4.66.1)
import random
def play_game():
number_to_guess = generate_number()
attempts = 0
while True:
if guess.lower() == "exit":
break
try:
guess = int(guess)
attempts += 1
else:
break
except ValueError:
play_game()
Output:
Welcome to the Guessing Game!
I have chosen a number between 1 and 100. Try to guess it.
Enter your guess (or 'exit' to quit): 35
Too low! Try again.
Enter your guess (or 'exit' to quit): 56
Too high! Try again.
Enter your guess (or 'exit' to quit): 33
Too low! Try again.
Enter your guess (or 'exit' to quit): 45
Too high! Try again.
Enter your guess (or 'exit' to quit): 40
Congratulations! You guessed the number in 5 attempts.