Traffic Sign Detection in CSharp
<font color=green> This project is part of the Emgu.CV.Example solution </font>
System Requirement
Component | Requirement | Detail |
---|---|---|
Emgu CV | Version 2.0.0.0 Alpha | |
Operation System | Cross Platform |
Traffic Sign Detection
Traffic sign detection is a crucial component in an autonomous vehicle navigation system. For an automobile to navigate itself safely in an urban environment, it must be able to understand traffic signs
- It should be able to read the speed limit, such that it will not received tickets for speeding and paid a premium on its insurance
- It should be able to read traffic lights and stop on red
- It should be able to read stop sign and yield to other vehicles which are also crossing the same intersection.
- ...
This tutorial aims to solve a small part of a autonomous vehicle navigation system, which detect stop sign from images captured by camera.
Stop Sign Detection
The first task to implement a stop sign detection algorithm is to understand the appearance of a stop sign in North American.
A North American stop sign is a red octagon with "STOP" written in the center. Given this information, we design the stop sign detection to be a two pass algorithm:
- In the first step, we try to extract red octagons
- In the second step, we use SURF to match features on the candidate region to our model stop sign. If sufficient matches are found, we consider it a stop sign.
Complete Source code
<source lang="csharp"> using System; using System.Collections.Generic; using System.Text; using System.Drawing; using Emgu.CV; using Emgu.CV.Structure; using Emgu.Util; using System.Diagnostics;
namespace TrafficSignRecognition {
public class StopSignDetector : DisposableObject { private SURFTracker _tracker; private MCvSURFParams _surfParam; private MemStorage _octagonStorage; private Contour<Point> _octagon;
public StopSignDetector() { _surfParam = new MCvSURFParams(500, false); using (Image<Bgr, Byte> stopSignModel = new Image<Bgr, Byte>("stop-sign-model.png")) using (Image<Gray, Byte> redMask = GetRedPixelMask(stopSignModel)) { _tracker = new SURFTracker(redMask.ExtractSURF(ref _surfParam)); } _octagonStorage = new MemStorage(); _octagon = new Contour<Point>(_octagonStorage); _octagon.PushMulti(new Point[] { new Point(1, 0), new Point(2, 0), new Point(3, 1), new Point(3, 2), new Point(2, 3), new Point(1, 3), new Point(0, 2), new Point(0, 1)}, Emgu.CV.CvEnum.BACK_OR_FRONT.FRONT); }
/// <summary> /// Compute the red pixel mask for the given image. /// A red pixel is a pixel where: 20 < hue < 160 AND satuation > 10 /// </summary> /// <param name="image">The color image to find red mask from</param> /// <returns>The red pixel mask</returns> private static Image<Gray, Byte> GetRedPixelMask(Image<Bgr, byte> image) { using (Image<Hsv, Byte> hsv = image.Convert<Hsv, Byte>()) { Image<Gray, Byte>[] channels = hsv.Split();
//channels[0] is the mask for hue less than 20 or larger than 160 CvInvoke.cvInRangeS(channels[0], new MCvScalar(20), new MCvScalar(160), channels[0]); channels[0]._Not();
//channels[1] is the mask for satuation of at least 10, this is mainly used to filter out white pixels channels[1]._ThresholdBinary(new Gray(10), new Gray(255.0));
CvInvoke.cvAnd(channels[0], channels[1], channels[0], IntPtr.Zero);
channels[1].Dispose(); channels[2].Dispose(); return channels[0]; } }
private void FindStopSign(Image<Bgr, byte> img, List<Image<Gray, Byte>> stopSignList, List<Rectangle> boxList, Contour<Point> contours) { for (; contours != null; contours = contours.HNext) { contours.ApproxPoly(contours.Perimeter * 0.02, 0, contours.Storage); if (contours.Area > 200) { double ratio = CvInvoke.cvMatchShapes(_octagon, contours, Emgu.CV.CvEnum.CONTOURS_MATCH_TYPE.CV_CONTOURS_MATCH_I3, 0);
if (ratio > 0.1) //not a good match of contour shape { Contour<Point> child = contours.VNext; if (child != null) FindStopSign(img, stopSignList, boxList, child); continue; }
Rectangle box = contours.BoundingRectangle;
Image<Gray, Byte> candidate; using (Image<Bgr, Byte> tmp = img.Copy(box)) candidate = tmp.Convert<Gray, byte>();
//set the value of pixels not in the contour region to zero using (Image<Gray, Byte> mask = new Image<Gray, byte>(box.Size)) { mask.Draw(contours, new Gray(255), new Gray(255), 0, -1, new Point(-box.X, -box.Y));
double mean = CvInvoke.cvAvg(candidate, mask).v0; candidate._ThresholdBinary(new Gray(mean), new Gray(255.0)); candidate._Not(); mask._Not(); candidate.SetValue(0, mask); }
SURFFeature[] features = candidate.ExtractSURF(ref _surfParam);
SURFTracker.MatchedSURFFeature[] matchedFeatures = _tracker.MatchFeature(features, 2, 20);
int goodMatchCount = 0; foreach (SURFTracker.MatchedSURFFeature ms in matchedFeatures) if (ms.Distances[0] < 0.5) goodMatchCount++;
if (goodMatchCount >= 10) { boxList.Add(box); stopSignList.Add(candidate); } } } }
public void DetectStopSign(Image<Bgr, byte> img, List<Image<Gray, Byte>> stopSignList, List<Rectangle> boxList) { Image<Bgr, Byte> smoothImg = img.SmoothGaussian(5, 5, 1.5, 1.5); Image<Gray, Byte> smoothedRedMask = GetRedPixelMask(smoothImg); smoothedRedMask._Dilate(1); smoothedRedMask._Erode(1); using (Image<Gray, Byte> canny = smoothedRedMask.Erode(3).Dilate(3).Canny(new Gray(100), new Gray(50))) using (MemStorage stor = new MemStorage()) { Contour<Point> contours = canny.FindContours( Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, stor); FindStopSign(img, stopSignList, boxList, contours); } }
protected override void DisposeObject() { _tracker.Dispose(); _octagonStorage.Dispose(); } }
} </source>