0% found this document useful (0 votes)
114 views4 pages

Jari

This C# code is processing video frames from a webcam to detect and count fingers. It applies skin detection and contour extraction to isolate the hand region. Convexity defects are then used to detect fingers by finding holes within the hand contour. The number of fingers is displayed and the main processing steps like filtering, contour extraction, and finger counting are done within the ProcessFramAndUpdateGUI method called each time a new video frame is available.

Uploaded by

Slamet Dhatank
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
114 views4 pages

Jari

This C# code is processing video frames from a webcam to detect and count fingers. It applies skin detection and contour extraction to isolate the hand region. Convexity defects are then used to detect fingers by finding holes within the hand contour. The number of fingers is displayed and the main processing steps like filtering, contour extraction, and finger counting are done within the ProcessFramAndUpdateGUI method called each time a new video frame is available.

Uploaded by

Slamet Dhatank
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
You are on page 1/ 4

using System;

using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
using Emgu.CV.UI;
using System.Runtime.InteropServices;
namespace objecttracking_openCV
{
public partial class Form1 : Form
{
Capture CapWebCam;
Seq<Point> Hull;
Image<Bgr, byte> imgOrignal;
Seq<MCvConvexityDefect> defects;
MCvConvexityDefect[] defectArray;
MCvBox2D box;
MemStorage storage = new MemStorage();
//bool button_pressed = false;
Public Form1()
{
InitializeComponent();
}
private void Form1_Load(object sender, EventArgs e)
{
//trying to capture a vedio input device
try
{
CapWebCam = new Capture();
}
catch (NullReferenceException except)
{
label3.Text = except.Message;
return;
}
Application.Idle += ProcessFramAndUpdateGUI;
}
private void Form1_FormClosed(object sender, FormClosedEventArgs e)
{
if (CapWebCam != null)
{
CapWebCam.Dispose();
}
}
void ProcessFramAndUpdateGUI(object Sender, EventArgs agr)
{
int Finger_num = 0;
Double Result1 = 0;
Double Result2 = 0;
//querying image
imgOrignal = CapWebCam.QueryFrame();
if (imgOrignal == null) return;
//Applying YCrCb filter
Image<Ycc, Byte> currentYCrCbFrame = imgOrignal.Convert<Ycc, byte>()
;
Image<Gray, byte> skin = new Image<Gray, byte>(imgOrignal.Width,
imgOrignal.Height);
skin = currentYCrCbFrame.InRange(new Ycc(0, 131, 80), new Ycc(255, 1
85, 135));
StructuringElementEx rect_12 = new StructuringElementEx(10, 10, 5, 5
,
Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
//Eroding the source image using the specified structuring element
CvInvoke.cvErode(skin, skin, rect_12, 1);
StructuringElementEx rect_6 = new StructuringElementEx(6, 6, 3, 3,
Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
//dilating the source image using the specified structuring element
CvInvoke.cvDilate(skin, skin, rect_6, 2);
skin = skin.Flip(FLIP.HORIZONTAL);
//smoothing the filterd , eroded and dilated image.
skin = skin.SmoothGaussian(9);
imgOrignal = imgOrignal.Flip(FLIP.HORIZONTAL);
//extracting contours.
Contour<Point> contours = skin.FindContours();
Contour<Point> biggestContour = null;
//extracting the biggest contour.
while (contours != null)
{
Result1 = contours.Area;
if (Result1 > Result2)
{
Result2 = Result1;
biggestContour = contours;
}
contours = contours.HNext;
}
//applying convexty defect allgoritm to find the count of fingers
if (biggestContour != null)
{
Finger_num = 0;
biggestContour = biggestContour.ApproxPoly((0.00025));
imgOrignal.Draw(biggestContour, new Bgr(Color.LimeGreen), 2);
Hull = biggestContour.GetConvexHull(ORIENTATION.CV_CLOCKWISE);
defects = biggestContour.GetConvexityDefacts(storage,
ORIENTATION.CV_CLOCKWISE);
imgOrignal.DrawPolyline(Hull.ToArray(), true, new Bgr(0, 0, 256)
, 2);
box = biggestContour.GetMinAreaRect();
defectArray = defects.ToArray();
for (int i = 0; i < defects.Total; i++)
{
PointF startPoint = new PointF((float)defectArray[i].StartPo
int.X,
(float)defectArray[i].StartPoint
.Y);
PointF depthPoint = new PointF((float)defectArray[i].DepthPo
int.X,
(float)defectArray[i].DepthP
oint.Y);
PointF endPoint = new PointF((float)defectArray[i].EndPoint.
X,
(float)defectArray[i].EndPoi
nt.Y);
CircleF startCircle = new CircleF(startPoint, 5f);
CircleF depthCircle = new CircleF(depthPoint, 5f);
CircleF endCircle = new CircleF(endPoint, 5f);
if ((startCircle.Center.Y < box.center.Y || depthCircle.Cent
er.Y < box.center.Y) &&
(startCircle.Center.Y < depthCircle.Center.Y) &&
(Math.Sqrt(Math.Pow(startCircle.Center.X - depthCirc
le.Center.X, 2) +
Math.Pow(startCircle.Center.Y - depthCirc
le.Center.Y, 2)) >
box.size.Height / 6.5))
{
Finger_num++;
}
}
label2.Text = Finger_num.ToString(); // updating fing
er count
}
// Finding the center of contour
{
MCvMoments moment = new MCvMoments(); // a new MCv
Moments object
try
{
moment = biggestContour.GetMoments(); // Moments o
f biggestContour
}
catch (NullReferenceException except)
{
label3.Text = except.Message;
return;
}
CvInvoke.cvMoments(biggestContour, ref moment, 0);
double m_00 = CvInvoke.cvGetSpatialMoment(ref moment, 0, 0);
double m_10 = CvInvoke.cvGetSpatialMoment(ref moment, 1, 0);
double m_01 = CvInvoke.cvGetSpatialMoment(ref moment, 0, 1);
int current_X = Convert.ToInt32(m_10 / m_00) / 10; // X loc
ation of centre of
contour
int current_Y = Convert.ToInt32(m_01 / m_00) / 10; // Y loc
ation of center of
contour
iborignal.Image = imgOrignal;
}
}
private void iborignal_Click(object sender, EventArgs e)
{
}
}
}

You might also like