NN - DL Project
NN - DL Project
---------------------------------------------------------------------------------------------------------------------------
---------------------------------------------------------------------------------------------------------------------------
import shutil
---------------------------------------------------------------------------------------------------------------------------
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
import matplotlib.pyplot as plt
---------------------------------------------------------------------------------------------------------------------------
train_dir = '/content/mood_dataset/train'
test_dir = '/content/mood_dataset/test'
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=20,
zoom_range=0.2,
horizontal_flip=True
)
test_datagen = ImageDataGenerator(rescale=1./255)
train_data = train_datagen.flow_from_directory(
train_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical'
)
test_data = test_datagen.flow_from_directory(
test_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical'
)
---------------------------------------------------------------------------------------------------------------------------
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(img_width, img_height, 3)),
MaxPooling2D(2, 2),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.3),
Dense(train_data.num_classes, activation='softmax')
])
---------------------------------------------------------------------------------------------------------------------------
history = model.fit(
train_data,
epochs=10,
validation_data=test_data
)
---------------------------------------------------------------------------------------------------------------------------
model.save('/content/mood_model.h5')
---------------------------------------------------------------------------------------------------------------------------
mood_to_music = {
"happy": ["https://youtu.be/ZbZSe6N_BXs", "https://youtu.be/60ItHLz5WEA"],
"sad": ["https://youtu.be/4N3N1MlvVc4"],
"angry": ["https://youtu.be/kXYiU_JCYtU"],
"neutral": ["https://youtu.be/fLexgOxsZu0"],
"surprise": ["https://youtu.be/34Na4j8AVgA"]
}
# Later use:
# predicted_mood = "happy"
# for link in mood_to_music[predicted_mood]:
# print(link)
---------------------------------------------------------------------------------------------------------------------------
---------------------------------------------------------------------------------------------------------------------------
---------------------------------------------------------------------------------------------------------------------------
import cv2
import numpy as np
from tensorflow.keras.models import load_model
from IPython.display import display, Javascript, Image
from google.colab.output import eval_js
from base64 import b64decode
document.body.appendChild(div);
div.appendChild(video);
video.srcObject = stream;
await video.play();
prediction = model.predict(face)
mood_index = np.argmax(prediction)
mood = emotion_labels[mood_index]
---------------------------------------------------------------------------------------------------------------------------
!pip install face_recognition
!apt-get install cmake # Install or upgrade CMake
!pip install dlib --upgrade --force-reinstall # Reinstall dlib (Optional)
import cv2
import numpy as np
from tensorflow.keras.models import load_model
import face_recognition
if frame is None:
print("Error: Could not load sample image.")
else:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(frame)
# Loop through each face found in the frame
for (top, right, bottom, left) in face_locations:
# Extract the face region
face_image = frame[top:bottom, left:right]
# Draw a box around the face and display the predicted mood
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.putText(frame, mood, (left + 6, bottom - 6), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255,
255, 255), 1)
def __len__(self):
return len(self.buffer)
BATCH_SIZE = 64
GAMMA = 0.99
EPSILON_START = 1.0
EPSILON_END = 0.01
EPSILON_DECAY = 5000
TARGET_UPDATE = 1000
epsilon = EPSILON_START
step_count = 0
num_episodes = 300
episode_rewards = []
reward = env.act(actions[action])
next_state = get_state()
done = env.game_over()
states = torch.FloatTensor(states).to(device)
actions_b = torch.LongTensor(actions_b).unsqueeze(1).to(device)
rewards = torch.FloatTensor(rewards).unsqueeze(1).to(device)
next_states = torch.FloatTensor(next_states).to(device)
dones = torch.FloatTensor(dones).unsqueeze(1).to(device)
if step_count % TARGET_UPDATE == 0:
target_net.load_state_dict(policy_net.state_dict())
episode_rewards.append(total_reward)
print(f"Episode {episode+1}, Reward: {total_reward:.2f}, Epsilon: {epsilon:.4f}")
Great question! ✅ After you've trained and saved the model (flappy_dqn.pth), you can load and test it
by making the agent play the game using the trained weights.
Since Google Colab can't show real-time PyGame screens, we'll make the agent play silently and
print results. You can also log scores or use GIF recording if needed.
for ep in range(test_episodes):
env.reset_game()
state = get_state()
total_reward = 0
reward = env.act(actions[action])
state = get_state()
total_reward += reward
test_rewards.append(total_reward)
print(f"Test Episode {ep+1} Reward: {total_reward:.2f}")
🧪 Output Example
You’ll see something like:
Test Episode 1 Reward: 12.0
Test Episode 2 Reward: 15.0
...
This confirms the agent is playing with its learned policy!