import React, { useRef, useEffect, useState } from "react";
import axios from "axios";
export default function DetectPlates() {
const videoRef = useRef(null);
const canvasRef = useRef(null);
const overlayRef = useRef(null);
const [detections, setDetections] = useState([]);
const [isDetecting, setIsDetecting] = useState(false);
useEffect(() => {
// Request access to the webcam
async function startWebcam() {
try {
const stream = await navigator.mediaDevices.getUserMedia({ video: true });
videoRef.current.srcObject = stream;
videoRef.current.play();
} catch (err) {
console.error("Error accessing webcam: ", err);
}
}
startWebcam();
}, []);
useEffect(() => {
// Once the video is playing, we can start detection at intervals
let detectInterval;
if (isDetecting) {
detectInterval = setInterval(() => {
captureFrameAndDetect();
}, 1000); // Capture + detect every 1 second
}
// Cleanup
return () => {
if (detectInterval) clearInterval(detectInterval);
};
}, [isDetecting]);
const captureFrameAndDetect = async () => {
if (!videoRef.current) return;
// Draw the current video frame onto a hidden canvas
const canvas = canvasRef.current;
const ctx = canvas.getContext("2d");
canvas.width = videoRef.current.videoWidth;
canvas.height = videoRef.current.videoHeight;
// Draw the current frame from the video onto the canvas
ctx.drawImage(videoRef.current, 0, 0, canvas.width, canvas.height);
// Convert that frame to a Data URL (base64)
const dataURL = canvas.toDataURL("image/jpeg");
try {
// Send base64 to your Node.js backend
const response = await axios.post("/api/detect", {
base64Image: dataURL,
});
// Store the detections in local state
setDetections(response.data.predictions || []);
} catch (err) {
console.error("Detection error: ", err);
}
};
// Once detections are set, we want to draw bounding boxes
useEffect(() => {
if (!overlayRef.current || !videoRef.current) return;
const overlayCanvas = overlayRef.current;
const ctx = overlayCanvas.getContext("2d");
overlayCanvas.width = videoRef.current.videoWidth;
overlayCanvas.height = videoRef.current.videoHeight;
// Clear previous drawings
ctx.clearRect(0, 0, overlayCanvas.width, overlayCanvas.height);
// Let's assume the detection data from Roboflow looks like:
// detections = [
// {
// x: centerX,
// y: centerY,
// width: boxWidth,
// height: boxHeight,
// class: "license_plate"
// ...
// },
// ];
detections.forEach((prediction) => {
const { x, y, width, height } = prediction;
const left = x - width / 2;
const top = y - height / 2;
// Draw the bounding box
ctx.beginPath();
ctx.rect(left, top, width, height);
ctx.lineWidth = 2;
ctx.strokeStyle = "red";
ctx.fillStyle = "rgba(255, 0, 0, 0.1)";
ctx.fill();
ctx.stroke();
// Draw the label
if (prediction.class) {
ctx.fillStyle = "red";
ctx.font = "16px Arial";
ctx.fillText(prediction.class, left, top - 5);
}
});
}, [detections]);
return (
<div style={{ textAlign: "center" }}>
<h1>Real-Time Plate Detection</h1>
<div style={{ position: "relative", display: "inline-block" }}>
{/* The live video feed */}
<video ref={videoRef} style={{ width: 640, height: 480 }} />
{/* Overlay canvas for bounding boxes */}
<canvas
ref={overlayRef}
style={{
position: "absolute",
top: 0,
left: 0,
width: 640,
height: 480,
pointerEvents: "none"
}}
/>
</div>
{/* Hidden canvas for capturing frames */}
<canvas ref={canvasRef} style={{ display: "none" }} />
<div style={{ marginTop: 20 }}>
{!isDetecting ? (
<button onClick={() => setIsDetecting(true)}>Start Detection</button>
) : (
<button onClick={() => setIsDetecting(false)}>Stop Detection</button>
)}
</div>
</div>
);
}