0% found this document useful (0 votes)
18 views4 pages

Code Try2

The document outlines a Flask application for real-time emotion detection using a webcam feed. It includes a main app file (app.py) that handles video streaming and emotion retrieval, an emotion detection module (emotion.py) that uses DeepFace for analyzing emotions, and an HTML front-end (index.html) for user interaction. The application displays detected emotions and notifies users of changes through a popup interface.

Uploaded by

ammartajudin1
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
18 views4 pages

Code Try2

The document outlines a Flask application for real-time emotion detection using a webcam feed. It includes a main app file (app.py) that handles video streaming and emotion retrieval, an emotion detection module (emotion.py) that uses DeepFace for analyzing emotions, and an HTML front-end (index.html) for user interaction. The application displays detected emotions and notifies users of changes through a popup interface.

Uploaded by

ammartajudin1
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd

app.

py:

from flask import Flask, render_template, Response, jsonify


import cv2
from emotion import get_emotion

app = Flask(__name__)

last_detected_emotion = None

def generate_frames():
global last_detected_emotion
cap = [Link](0)
while True:
success, frame = [Link]()
if not success:
break
else:
emotion = get_emotion(frame)
if emotion:
last_detected_emotion = emotion
[Link](frame, f"Emotion: {emotion}", (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

ret, buffer = [Link]('.jpg', frame)


frame = [Link]()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')

@[Link]('/')
def index():
return render_template('[Link]')

@[Link]('/video_feed')
def video_feed():
return Response(generate_frames(), mimetype='multipart/x-mixed-replace;
boundary=frame')

@[Link]('/get_emotion')
def get_current_emotion():
global last_detected_emotion
return jsonify({'emotion': last_detected_emotion})

if __name__ == '__main__':
[Link](debug=True)

[Link]:

import cv2
from deepface import DeepFace
import time
import logging
import threading
import numpy as np

class EmotionDetector:
def __init__(self):
self.face_cascade = [Link]([Link] +
'haarcascade_frontalface_default.xml')
self.confidence_threshold = 0.7
self.time_window = 3
self.emotion_votes = {}
self.last_detected_emotion = None
self.last_emotion_time = 0
[Link] = [Link]()

def detect_emotion(self, frame):


gray_frame = [Link](frame, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1,
minNeighbors=5, minSize=(30, 30))

if len(faces) == 0:
return None

x, y, w, h = faces[0]
face_roi = frame[y:y + h, x:x + w]

try:
result = [Link](face_roi, actions=['emotion'],
enforce_detection=False)
if isinstance(result, list):
result = result[0]

emotion = result['dominant_emotion']
confidence = result['emotion'][emotion]

if confidence >= self.confidence_threshold:


with [Link]:
if emotion in self.emotion_votes:
self.emotion_votes[emotion] += confidence
else:
self.emotion_votes[emotion] = confidence

current_time = [Link]()
if current_time - self.last_emotion_time >= self.time_window:
with [Link]:
if self.emotion_votes:
final_emotion = max(self.emotion_votes,
key=self.emotion_votes.get)
self.emotion_votes = {}
self.last_detected_emotion = final_emotion
self.last_emotion_time = current_time
[Link](f"Final emotion: {final_emotion}")
return final_emotion

except Exception as e:
[Link](f"Error in emotion detection: {e}")

return None

# Initialize logging
[Link](filename='emotion_detection.log', level=[Link])

# Create a global instance of EmotionDetector


emotion_detector = EmotionDetector()

def get_emotion(frame):
return emotion_detector.detect_emotion(frame)

[Link]:

<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" href="/[Link]">
<title>Real-time Emotion Detection</title>
<style>
#popup {
display: none;
position: fixed;
top: 20px;
right: 20px;
padding: 15px;
background-color: #4CAF50;
color: white;
border-radius: 5px;
z-index: 1000;
}
</style>
</head>
<body>
<h1>Real-time Emotion Detection</h1>
<div id="videoContainer">
<img id="videoFeed" src="" alt="Video Feed" style="display:none;">
</div>
<div id="detectedEmotion"></div>
<button id="detectEmotionBtn">Detect Emotion</button>
<div id="popup"></div>

<script>
let lastEmotion = null;

[Link]("detectEmotionBtn").onclick = function() {
startDetection();
};

function showEmotion(emotion) {
[Link]("detectedEmotion").textContent = `Detected Emotion: $
{emotion}`;
if (emotion !== lastEmotion) {
showPopup(`Emotion change detected: ${emotion}`);
lastEmotion = emotion;
}
}

function showPopup(message) {
const popup = [Link]("popup");
[Link] = message;
[Link] = "block";
setTimeout(() => {
[Link] = "none";
}, 3000); // Hide popup after 3 seconds
}

function startDetection() {
const videoFeed = [Link]("videoFeed");
[Link] = "block"; // Show the video feed
[Link] = "/video_feed"; // Start the video feed
[Link]("Emotion detection started!");
startEmotionPolling();
}

function startEmotionPolling() {
setInterval(() => {
fetch('/get_emotion')
.then(response => [Link]())
.then(data => {
if ([Link]) {
showEmotion([Link]);
}
})
.catch(error => [Link]('Error:', error));
}, 1000); // Poll every second
}
</script>
</body>
</html>

[Link]:

body {
font-family: Arial, sans-serif;
display: flex;
flex-direction: column;
align-items: center;
background-color: #f0f0f0;
}

h1 {
margin-top: 20px;
}

#videoContainer {
margin-top: 20px;
border: 2px solid #000;
width: 640px; /* Adjust width as needed */
}

img {
width: 100%; /* Make the video frame responsive */
}

You might also like