Motion Detection cars !

Ask question on using Emgu CV with C#

Motion Detection cars !

Postby fullrose » Wed Mar 21, 2012 11:41 pm

Image
link picture:
https://picasaweb.google.com/1182736579 ... 3266365250
hi ! i'm a new member Emgucv !
I want draw Rectangle in picture(Capture) . Motion Detection is running in Rectangle.
like is picture .

Code: Select all
 
       private void ProcessFrame(object sender, EventArgs e)
        {
            // Get the current frame from the camera - color and gray
            Image<Bgr, Byte> originalFrame = _capture.QueryFrame();

            // This usually occurs when using a video file - after the last frame is read
            // the next frame is null
            if (originalFrame == null)
            {
                // Reset the camera since no frame was captured - for videos, restart the video playback
                ResetCamera();
                originalFrame = _capture.QueryFrame();
            }

            Image<Bgr, Byte> image = originalFrame.Resize(_frameWidth, _frameHeight,0);
            Image<Gray, Byte> frame = image.Convert<Gray, Byte>();

            // Perform differencing on them to find the "new introductions to the background" and "motions"
            Image<Gray, Byte> BgDifference = new Image<Gray, byte>(_frameWidth, _frameHeight);
            Image<Gray, Byte> FrameDifference = new Image<Gray, byte>(_frameWidth, _frameHeight);
            CvInvoke.cvAbsDiff(_backgroundImage, frame, BgDifference);
            CvInvoke.cvAbsDiff((_lastFrame == null) ? frame : _lastFrame, frame, FrameDifference);

            // Perform thresholding to remove noise and boost "new introductions"
            Image<Gray, Byte> thresholded = new Image<Gray, byte>(_frameWidth, _frameHeight);
            CvInvoke.cvThreshold(BgDifference, thresholded, 20, 255, THRESH.CV_THRESH_BINARY);

            // Perform erosion to remove camera noise
            Image<Gray, Byte> eroded = new Image<Gray, byte>(_frameWidth, _frameHeight);
            CvInvoke.cvErode(thresholded, eroded, IntPtr.Zero, 2);

            // Takes the thresholded image and looks for squares and draws the squares out on top of the current frame
            drawBoxes(eroded, image);

            // Put the captured frame in the imagebox
            capturedImageBox.Image = image;
            // Store the current frame in the _lastFrame variable - it becomes the last frame now
            _lastFrame = image.Convert<Gray, Byte>();

            // Draw the frame-to-frame difference (motion) on to the imgImageBox image box
            imgImageBox.Image = FrameDifference;

            // Draw the thresholded image in the motionImageBox image box - so that we can view it
            motionImageBox.Image = eroded;

            // Move the background close to the current frame
            if (_adaptiveBackground == true)
            {
                Image<Gray, Byte> newBackground = new Image<Gray, byte>(_frameWidth, _frameHeight);
                MoveToward(ref _backgroundImage, ref frame, ref newBackground, _backgroundAdaptionRate);
                _backgroundImage = newBackground;
            }
            grayImageBox.Image = _backgroundImage;
        }
     private void drawBoxes(Emgu.CV.Image<Gray, Byte> img, Emgu.CV.Image<Bgr, Byte> original)
        {

            Gray cannyThreshold = new Gray(180);
            Gray cannyThresholdLinking = new Gray(120);
            Gray circleAccumulatorThreshold = new Gray(120);

            Image<Gray, Byte> cannyEdges = img.Canny(cannyThreshold, cannyThresholdLinking);
            LineSegment2D[] lines = cannyEdges.HoughLinesBinary(
                2, //Distance resolution in pixel-related units
                Math.PI / 45.0, //Angle resolution measured in radians.
                20, //threshold
                30, //min Line width
                10 //gap between lines
                )[0]; //Get the lines from the first channel


            #region Find rectangles
            List<MCvBox2D> boxList = new List<MCvBox2D>();

            using (MemStorage storage = new MemStorage()) //allocate storage for contour approximation
                for (Contour<Point> contours = cannyEdges.FindContours(); contours != null; contours = contours.HNext)
                {
                    Contour<Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.05, storage);

                    if (contours.Area > 250) //only consider contours with area greater than 250
                    {
                        if (currentContour.Total == 4) //The contour has 4 vertices.
                        {
                            #region determine if all the angles in the contour are within the range of [80, 100] degree
                            bool isRectangle = true;
                            Point[] pts = currentContour.ToArray();
                            LineSegment2D[] edges = PointCollection.PolyLine(pts, true);

                            for (int i = 0; i < edges.Length; i++)
                            {
                                double angle = Math.Abs(
                                   edges[(i + 1) % edges.Length].GetExteriorAngleDegree(edges[i]));
                                if (angle < 80 || angle > 100)
                                {
                                    isRectangle = false;
                                    break;
                                }
                            }
                            #endregion

                            if (isRectangle) boxList.Add(currentContour.GetMinAreaRect());
                        }
                    }
                }
            #endregion

            #region draw rectangles
            Image<Bgr, Byte> rectangleImage = new Image<Bgr, byte>(img.Width, img.Height);
            foreach (MCvBox2D box in boxList)
            {
                rectangleImage.Draw(box, new Bgr(Color.DarkOrange), 2);
                original.Draw(box, new Bgr(Color.DarkOrange), 2);
            }

            capturedImageBox.Image = rectangleImage;
            #endregion
        }
fullrose
 
Posts: 4
Joined: Wed Mar 21, 2012 11:23 pm

Re: Motion Detection cars !

Postby Chris_Johnson » Thu Mar 22, 2012 2:08 pm

Hi,

Sorry what is the problem? Isn't this section drawing your rectangle could you expand on what your after?

Code: Select all
            #region draw rectangles
            Image<Bgr, Byte> rectangleImage = new Image<Bgr, byte>(img.Width, img.Height);
            foreach (MCvBox2D box in boxList)
            {
                rectangleImage.Draw(box, new Bgr(Color.DarkOrange), 2);
                original.Draw(box, new Bgr(Color.DarkOrange), 2);
            }

            capturedImageBox.Image = rectangleImage;
            #endregion


Cheers,
Chris
Chris_Johnson
 
Posts: 534
Joined: Tue Sep 20, 2011 3:29 pm

Re: Motion Detection cars !

Postby fullrose » Thu Mar 22, 2012 8:27 pm

Here , The Code is detection rectangles and Motion Detection conver, the object is now white . i want detection car. and draw Rectangle on car.
plz , help me .

Cheers,

Thanks
fullrose
 
Posts: 4
Joined: Wed Mar 21, 2012 11:23 pm

Re: Motion Detection cars !

Postby fullrose » Mon Mar 26, 2012 10:53 pm

hi ,
can anyone help me answer.
plz.
fullrose
 
Posts: 4
Joined: Wed Mar 21, 2012 11:23 pm

Re: Motion Detection cars !

Postby Chris_Johnson » Tue Mar 27, 2012 3:17 am

Hi,

Ok so the best choice is blob detection:

viewtopic.php?f=7&t=225
http://www.emgu.com/wiki/files/2.0.0.0/ ... 60d12a.htm

So why blob detection well blob detection will track by magnitude i.e. you can set it to find the largest blob. Other such methods such as surf or template matching will match on shape something you can't do very well with a car (to many variations).

Have a go and see how it turns out out,

Cheers,
Chris
Chris_Johnson
 
Posts: 534
Joined: Tue Sep 20, 2011 3:29 pm

Re: Motion Detection cars !

Postby fullrose » Wed Apr 11, 2012 7:46 pm

hi Chris_Johnson,
Thanks you so much.

More issues Detection LicensePlateRecognition .

In picture , we can see Detection a LicensePlateRecognition . but in here , Picture are two LicensePlateRecognition .

Image

Code: Select all
Image<Gray, byte> gray = image.Convert<Gray, Byte>();
Image<Gray, Byte> canny = new Image<Gray, byte>(gray.Size);

CvInvoke.cvCanny(gray, canny, 100, 50, 3);
 List<MCvBox2D> boxList = new List<MCvBox2D>();
         using (MemStorage stor = new MemStorage())
         {
             Contour<Point> contours = canny.FindContours(
                  Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                  Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST,
                  stor);
             for (; contours != null; contours = contours.HNext)
             {    if (contours.Area > 100)
                 {
                     Point _point = new Point(contours.BoundingRectangle.Location.X, contours.BoundingRectangle.Location.Y);
                     Size tableDim = new Size(contours.BoundingRectangle.Size.Width, contours.BoundingRectangle.Size.Height);
                     Rectangle myRectangle1 = new Rectangle(_point, tableDim);
                     gray.Draw(myRectangle1, new Gray(255), 1);
                 }
                 
             }
         }


Code: Select all
  private void FindLicensePlate(
         Contour<Point> contours, Image<Gray, Byte> gray, Image<Gray, Byte> canny,
         List<Image<Gray, Byte>> licensePlateImagesList, List<Image<Gray, Byte>> filteredLicensePlateImagesList, List<MCvBox2D> detectedLicensePlateRegionList,
         List<String> licenses)
      {
         for (; contours != null; contours = contours.HNext)
         {
            int numberOfChildren = GetNumberOfChildren(contours);     
            //if it does not contains any children (charactor), it is not a license plate region
            if (numberOfChildren == 0) continue;

            if (contours.Area > 400)
            {
               if (numberOfChildren < 3)
               {
                  //If the contour has less than 3 children, it is not a license plate (assuming license plate has at least 3 charactor)
                  //However we should search the children of this contour to see if any of them is a license plate
                  FindLicensePlate(contours.VNext, gray, canny, licensePlateImagesList, filteredLicensePlateImagesList, detectedLicensePlateRegionList, licenses);
                  continue;
               }

               MCvBox2D box = contours.GetMinAreaRect();
               if (box.angle < -45.0)
               {
                  float tmp = box.size.Width;
                  box.size.Width = box.size.Height;
                  box.size.Height = tmp;
                  box.angle += 90.0f;
               }
               else if (box.angle > 45.0)
               {
                  float tmp = box.size.Width;
                  box.size.Width = box.size.Height;
                  box.size.Height = tmp;
                  box.angle -= 90.0f;
               }

               double whRatio = (double)box.size.Width / box.size.Height;
               if (!(0 < whRatio && whRatio < 2.0))
               //if (!(1.0 < whRatio && whRatio < 2.0))
               {  //if the width height ratio is not in the specific range,it is not a license plate
                  //However we should search the children of this contour to see if any of them is a license plate
                  Contour<Point> child = contours.VNext;
                  if (child != null)
                     FindLicensePlate(child, gray, canny, licensePlateImagesList, filteredLicensePlateImagesList, detectedLicensePlateRegionList, licenses);
                  continue;
               }

               using (Image<Gray, Byte> tmp1 = gray.Copy(box))
               //resize the license plate such that the front is ~ 10-12. This size of front results in better accuracy from tesseract
               using (Image<Gray, Byte> tmp2 = tmp1.Resize(240, 180, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC, true))
               {
                  //removes some pixels from the edge
                  int edgePixelSize = 2;
                  tmp2.ROI = new Rectangle(new Point(edgePixelSize, edgePixelSize), tmp2.Size - new Size(2 * edgePixelSize, 2 * edgePixelSize));
                  Image<Gray, Byte> plate = tmp2.Copy();

                  Image<Gray, Byte> filteredPlate = FilterPlate(plate);

                  Tesseract.Charactor[] words;
                  StringBuilder strBuilder = new StringBuilder();
                  using (Image<Gray, Byte> tmp = filteredPlate.Clone())
                  {
                     _ocr.Recognize(tmp);
                     words = _ocr.GetCharactors();
                     
                     if (words.Length == 0) continue;

                     for (int i = 0; i < words.Length; i++)
                     {
                        strBuilder.Append(words[i].Text);
                     }
                  }

                  licenses.Add(strBuilder.ToString());
                  licensePlateImagesList.Add(plate);
                  filteredLicensePlateImagesList.Add(filteredPlate);
                  detectedLicensePlateRegionList.Add(box);

               }
            }
         }
      }
fullrose
 
Posts: 4
Joined: Wed Mar 21, 2012 11:23 pm

Re: Motion Detection cars !

Postby Tolis » Tue Sep 25, 2012 8:09 am

TRY THIS MY FRIEND ! JUST PUT IT SOMWHERE AND CALL IT

private void ExtractContourAndHull(Image<Gray, byte> eroded) // SENT YOUR IMAGE AFTER EROSION IN THIS FUNCTION
{
using (MemStorage storage = new MemStorage())
{

Contour<Point> contours = eroded.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage);
Contour<Point> biggestContour = null;

Double Result1 = 0;
Double Result2 = 0;
while (contours != null)
{
Result1 = contours.Area;
if (Result1 > Result2)
{
Result2 = Result1;
biggestContour = contours;
}
contours = contours.HNext;
}



if (biggestContour != null)
{

Contour<Point> currentContour;

currentContour = biggestContour.ApproxPoly(biggestContour.Perimeter * 0.0025, storage);

biggestContour = currentContour;


hull = biggestContour.GetConvexHull(Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE);
box = biggestContour.GetMinAreaRect();
PointF[] points = box.GetVertices();
handRect = box.MinAreaRect();
currentFrame.Draw(handRect, new Bgr(200, 50, 50), 5); // HERE I PUT MY FRAME WITHOUT TREATMENT
frame1 = capture.QueryFrame();
frame1.Draw(handRect, new Bgr(0, 0, 230), 2); // HERE THE FRAME WICH GET THE TREATMENTS
capturedImageBox.Image = frame1;
frame1 = new Image<Bgr,byte>(_frameWidth,_frameHeight);


Point[] ps = new Point[points.Length ];
for (int i = 0; i < points.Length ; i++)
ps[i] = new Point((int)points[i].X, (int)points[i].Y);

//currentFrame.DrawPolyline(hull.ToArray(), true, new Bgr(200, 125, 75), 2); // OTHER DESIGNS TO DRAW
//currentFrame.Draw(new CircleF(new PointF(box.center.X, box.center.Y), 3), new Bgr(200, 125, 75), 2);

// ellip.MCvBox2D= CvInvoke.cvFitEllipse2(biggestContour.Ptr);
// currentFrame.Draw(new Ellipse(ellip.MCvBox2D), new Bgr(Color.LavenderBlush), 3);

PointF center;
float radius;
// CvInvoke.cvMinEnclosingCircle(biggestContour.Ptr, out center, out radius);
// currentFrame.Draw(new CircleF(center, radius), new Bgr(Color.Gold), 2);

// currentFrame.Draw(new CircleF(new PointF(ellip.MCvBox2D.center.X, ellip.MCvBox2D.center.Y), 3), new Bgr(100, 25, 55), 2);
// currentFrame.Draw(ellip, new Bgr(Color.DeepPink), 2);

// CvInvoke.cvEllipse(currentFrame, new Point((int)ellip.MCvBox2D.center.X, (int)ellip.MCvBox2D.center.Y), new System.Drawing.Size((int)ellip.MCvBox2D.size.Width, (int)ellip.MCvBox2D.size.Height), ellip.MCvBox2D.angle, 0, 360, new MCvScalar(120, 233, 88), 1, Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, 0);
// currentFrame.Draw(new Ellipse(new PointF(box.center.X, box.center.Y), new SizeF(box.size.Height, box.size.Width), box.angle), new Bgr(0, 0, 0), 2);
textBox1.Text = box.center.X.ToString();
textBox2.Text = box.center.Y.ToString();
filteredHull = new Seq<Point>(storage);
for (int i = 0; i < hull.Total; i++)
{
if (Math.Sqrt(Math.Pow(hull[i].X - hull[i + 1].X, 2) + Math.Pow(hull[i].Y - hull[i + 1].Y, 2)) > box.size.Width / 10)
{
filteredHull.Push(hull[i]);
}
}

defects = biggestContour.GetConvexityDefacts(storage, Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE);

defectArray = defects.ToArray();

grayImageBox.Image = currentFrame;
currentFrame = new Image<Bgr, byte>(_frameWidth, _frameHeight);

}
}
}
Tolis
 
Posts: 1
Joined: Mon Sep 17, 2012 6:12 am


Return to C# Help

Who is online

Users browsing this forum: No registered users and 2 guests