SURF feature detector in CSharp: Difference between revisions

From EMGU
Jump to navigation Jump to search
No edit summary
 
(17 intermediate revisions by 4 users not shown)
Line 1: Line 1:
<font color=green>'''This project is part of the Emgu.CV.Example solution'''</font>
== System Requirement ==
== System Requirement ==
{| style="text-align:center" border="1px" cellpadding="10" cellspacing="0"
{| style="text-align:center" border="1px" cellpadding="10" cellspacing="0"
!Component || Requirement || Detail  
!Component || Requirement || Detail  
|-
|-
|Emgu CV || Version 1.5 ||   
|Emgu CV || [[Version_History#Emgu.CV-2.4.0|Version 2.4.0]] + ||   
|-
|-
|Operation System || Cross Platform ||  
|Operation System || Cross Platform ||  
Line 9: Line 11:


== Source Code ==
== Source Code ==
=== Emgu CV 3.x ===
<div class="toccolours mw-collapsible mw-collapsed">
Click to view source code
<div class="mw-collapsible-content">
<source lang="csharp">
<source lang="csharp">
using System;
using System;
using System.Collections.Generic;
using System.Collections.Generic;
using System.Windows.Forms;
using System.Diagnostics;
using System.Drawing;
using System.Drawing;
using System.Runtime.InteropServices;
using Emgu.CV;
using Emgu.CV;
using Emgu.CV.UI;
using Emgu.CV.CvEnum;
using Emgu.CV.CvEnum;
using Emgu.CV.Features2D;
using Emgu.CV.Structure;
using Emgu.CV.Structure;
using Emgu.CV.Util;
#if !__IOS__
using Emgu.CV.Cuda;
#endif
using Emgu.CV.XFeatures2D;


namespace SURFFeatureExample
namespace SURFFeatureExample
{
{
   static class Program
   public static class DrawMatches
   {
   {
       /// <summary>
       public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
      /// The main entry point for the application.
      /// </summary>
      [STAThread]
      static void Main()
       {
       {
         Application.EnableVisualStyles();
         int k = 2;
         Application.SetCompatibleTextRenderingDefault(false);
        double uniquenessThreshold = 0.8;
        Run();
        double hessianThresh = 300;
       
        Stopwatch watch;
        homography = null;
 
        modelKeyPoints = new VectorOfKeyPoint();
         observedKeyPoints = new VectorOfKeyPoint();
 
        #if !__IOS__
        if ( CudaInvoke.HasCuda)
        {
            CudaSURF surfCuda = new CudaSURF((float) hessianThresh);
            using (GpuMat gpuModelImage = new GpuMat(modelImage))
            //extract features from the object image
            using (GpuMat gpuModelKeyPoints = surfCuda.DetectKeyPointsRaw(gpuModelImage, null))
            using (GpuMat gpuModelDescriptors = surfCuda.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
            using (CudaBFMatcher matcher = new CudaBFMatcher(DistanceType.L2))
            {
              surfCuda.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
              watch = Stopwatch.StartNew();
 
              // extract features from the observed image
              using (GpuMat gpuObservedImage = new GpuMat(observedImage))
              using (GpuMat gpuObservedKeyPoints = surfCuda.DetectKeyPointsRaw(gpuObservedImage, null))
              using (GpuMat gpuObservedDescriptors = surfCuda.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
              //using (GpuMat tmp = new GpuMat())
              //using (Stream stream = new Stream())
              {
                  matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k);
 
                  surfCuda.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);
 
                  mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                  mask.SetTo(new MCvScalar(255));
                  Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);
 
                  int nonZeroCount = CvInvoke.CountNonZero(mask);
                  if (nonZeroCount >= 4)
                  {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                        matches, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                          observedKeyPoints, matches, mask, 2);
                  }
              }
                  watch.Stop();
              }
            }
        else
        #endif
        {
            using (UMat uModelImage = modelImage.ToUMat(AccessType.Read))
            using (UMat uObservedImage = observedImage.ToUMat(AccessType.Read))
            {
              SURF surfCPU = new SURF(hessianThresh);
              //extract features from the object image
              UMat modelDescriptors = new UMat();
              surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);
 
              watch = Stopwatch.StartNew();
 
              // extract features from the observed image
              UMat observedDescriptors = new UMat();
              surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
              BFMatcher matcher = new BFMatcher(DistanceType.L2);
              matcher.Add(modelDescriptors);
 
              matcher.KnnMatch(observedDescriptors, matches, k, null);
              mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
              mask.SetTo(new MCvScalar(255));
              Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);
 
              int nonZeroCount = CvInvoke.CountNonZero(mask);
              if (nonZeroCount >= 4)
              {
                  nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                    matches, mask, 1.5, 20);
                  if (nonZeroCount >= 4)
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                        observedKeyPoints, matches, mask, 2);
              }
 
              watch.Stop();
            }
        }
        matchTime = watch.ElapsedMilliseconds;
       }
       }


       static void Run()
       /// <summary>
      /// Draw the model image and observed image, the matched features and homography projection.
      /// </summary>
      /// <param name="modelImage">The model image</param>
      /// <param name="observedImage">The observed image</param>
      /// <param name="matchTime">The output total time for computing the homography matrix.</param>
      /// <returns>The model image and observed image, the matched features and homography projection.</returns>
      public static Mat Draw(Mat modelImage, Mat observedImage, out long matchTime)
       {
       {
         Image<Gray, Byte> modelImage = new Image<Gray, byte>("box.png");
         Mat homography;
        VectorOfKeyPoint modelKeyPoints;
        VectorOfKeyPoint observedKeyPoints;
        using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
        {
            Mat mask;
            FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, matches,
              out mask, out homography);


        #region extract features from the object image
            //Draw the matched keypoints
        MCvSURFParams param1 = new MCvSURFParams(500, false);
            Mat result = new Mat();
        SURFFeature[] modelFeatures = modelImage.ExtractSURF(ref param1);
            Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
        SURFFeature[] modelFeaturesPositiveLaplacian = Array.FindAll<SURFFeature>(modelFeatures, delegate(SURFFeature f) { return f.Point.laplacian >= 0; });
              matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);
        SURFFeature[] modelFeaturesNegativeLaplacian = Array.FindAll<SURFFeature>(modelFeatures, delegate(SURFFeature f) { return f.Point.laplacian < 0; });


        //Create feature trees for the given features
             #region draw the projected region on the image
        FeatureTree featureTreePositiveLaplacian = new FeatureTree(
            Array.ConvertAll<SURFFeature, Matrix<float>>(
              modelFeaturesPositiveLaplacian,
              delegate(SURFFeature f) { return f.Descriptor; }));
        FeatureTree featureTreeNegativeLaplacian = new FeatureTree(
             Array.ConvertAll<SURFFeature, Matrix<float>>(
              modelFeaturesNegativeLaplacian,
              delegate(SURFFeature f) { return f.Descriptor; }));
        #endregion


        Image<Gray, Byte> observedImage = new Image<Gray, byte>("box_in_scene.png");
            if (homography != null)
            {
              //draw a rectangle along the projected model
              Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
              PointF[] pts = new PointF[]
              {
                  new PointF(rect.Left, rect.Bottom),
                  new PointF(rect.Right, rect.Bottom),
                  new PointF(rect.Right, rect.Top),
                  new PointF(rect.Left, rect.Top)
              };
              pts = CvInvoke.PerspectiveTransform(pts, homography);


        #region extract features from the observed image
              Point[] points = Array.ConvertAll<PointF, Point>(pts, Point.Round);
        MCvSURFParams param2 = new MCvSURFParams(500, false);
              using (VectorOfPoint vp = new VectorOfPoint(points))
        SURFFeature[] imageFeatures = observedImage.ExtractSURF(ref param2);
              {
        SURFFeature[] imageFeaturesPositiveLaplacian = Array.FindAll<SURFFeature>(imageFeatures, delegate(SURFFeature f) { return f.Point.laplacian >= 0; });
                  CvInvoke.Polylines(result, vp, true, new MCvScalar(255, 0, 0, 255), 5);
        SURFFeature[] imageFeaturesNegativeLaplacian = Array.FindAll<SURFFeature>(imageFeatures, delegate(SURFFeature f) { return f.Point.laplacian < 0; });
              }
        #endregion
             
            }


        #region Merge the object image and the observed image into one image for display
            #endregion
        Image<Gray, Byte> res = new Image<Gray, byte>(Math.Max(modelImage.Width, observedImage.Width), modelImage.Height + observedImage.Height);
        res.ROI = new System.Drawing.Rectangle(0, 0, modelImage.Width, modelImage.Height);
        modelImage.Copy(res, null);
        res.ROI = new System.Drawing.Rectangle(0, modelImage.Height, observedImage.Width, observedImage.Height);
        observedImage.Copy(res, null);
        res.ROI = Rectangle.Empty;
        #endregion


        double matchDistanceRatio = 0.8;
            return result;
        List<PointF> modelPoints = new List<PointF>();
        List<PointF> observePoints = new List<PointF>();


         #region using Feature Tree to match feature
         }
        Matrix<float>[] imageFeatureDescriptorsPositiveLaplacian = Array.ConvertAll<SURFFeature, Matrix<float>>(
      }
            imageFeaturesPositiveLaplacian,
  }
            delegate(SURFFeature f) { return f.Descriptor; });
}
        Matrix<float>[] imageFeatureDescriptorsNegativeLaplacian = Array.ConvertAll<SURFFeature, Matrix<float>>(
            imageFeaturesNegativeLaplacian,
            delegate(SURFFeature f) { return f.Descriptor; });
        Matrix<Int32> result1;
        Matrix<double> dist1;


        featureTreePositiveLaplacian.FindFeatures(imageFeatureDescriptorsPositiveLaplacian, out result1, out dist1, 2, 20);
</source>
        MatchSURFFeatureWithFeatureTree(
</div>
          modelFeaturesPositiveLaplacian,
</div>
          imageFeaturesPositiveLaplacian,
          matchDistanceRatio, result1.Data, dist1.Data, modelPoints, observePoints);


        featureTreeNegativeLaplacian.FindFeatures(imageFeatureDescriptorsNegativeLaplacian, out result1, out dist1, 2, 20);
=== Emgu CV 2.x ===
        MatchSURFFeatureWithFeatureTree(
<div class="toccolours mw-collapsible mw-collapsed">
              modelFeaturesNegativeLaplacian,
Click to view source code
              imageFeaturesNegativeLaplacian,
<div class="mw-collapsible-content">
              matchDistanceRatio, result1.Data, dist1.Data, modelPoints, observePoints);
<source lang="csharp">
         #endregion
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Drawing;
using System.Runtime.InteropServices;
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Features2D;
using Emgu.CV.Structure;
using Emgu.CV.Util;
using Emgu.CV.GPU;
 
namespace SURFFeatureExample
{
  public static class DrawMatches
  {
      /// <summary>
      /// Draw the model image and observed image, the matched features and homography projection.
      /// </summary>
      /// <param name="modelImage">The model image</param>
      /// <param name="observedImage">The observed image</param>
      /// <param name="matchTime">The output total time for computing the homography matrix.</param>
      /// <returns>The model image and observed image, the matched features and homography projection.</returns>
      public static Image<Bgr, Byte> Draw(Image<Gray, Byte> modelImage, Image<Gray, byte> observedImage, out long matchTime)
      {
        Stopwatch watch;
         HomographyMatrix homography = null;


         Matrix<float> homographyMatrix = CameraCalibration.FindHomography(
         SURFDetector surfCPU = new SURFDetector(500, false);
            modelPoints.ToArray(), //points on the object image
        VectorOfKeyPoint modelKeyPoints;
            observePoints.ToArray(), //points on the observed image
        VectorOfKeyPoint observedKeyPoints;
            HOMOGRAPHY_METHOD.RANSAC,
        Matrix<int> indices;
            3).Convert<float>();


         #region draw the projected object in observed image
         Matrix<byte> mask;
         for (int i = 0; i < modelPoints.Count; i++)
         int k = 2;
        double uniquenessThreshold = 0.8;
        if (GpuInvoke.HasCuda)
         {
         {
             PointF p = observePoints[i];
             GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
             p.Y += modelImage.Height;
             using (GpuImage<Gray, Byte> gpuModelImage = new GpuImage<Gray, byte>(modelImage))
             res.Draw(new LineSegment2DF(modelPoints[i], p), new Gray(0), 1);
            //extract features from the object image
        }
            using (GpuMat<float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
             using (GpuMat<float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
            using (GpuBruteForceMatcher<float> matcher = new GpuBruteForceMatcher<float>(DistanceType.L2))
            {
              modelKeyPoints = new VectorOfKeyPoint();
              surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
              watch = Stopwatch.StartNew();
 
              // extract features from the observed image
              using (GpuImage<Gray, Byte> gpuObservedImage = new GpuImage<Gray, byte>(observedImage))
              using (GpuMat<float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
              using (GpuMat<float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
              using (GpuMat<int> gpuMatchIndices = new GpuMat<int>(gpuObservedDescriptors.Size.Height, k, 1, true))
              using (GpuMat<float> gpuMatchDist = new GpuMat<float>(gpuObservedDescriptors.Size.Height, k, 1, true))
              using (GpuMat<Byte> gpuMask = new GpuMat<byte>(gpuMatchIndices.Size.Height, 1, 1))
              using (Stream stream = new Stream())
              {
                  matcher.KnnMatchSingle(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);
                  indices = new Matrix<int>(gpuMatchIndices.Size);
                  mask = new Matrix<byte>(gpuMask.Size);
 
                  //gpu implementation of voteForUniquess
                  using (GpuMat<float> col0 = gpuMatchDist.Col(0))
                  using (GpuMat<float> col1 = gpuMatchDist.Col(1))
                  {
                    GpuInvoke.Multiply(col1, new MCvScalar(uniquenessThreshold), col1, stream);
                    GpuInvoke.Compare(col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);
                  }
 
                  observedKeyPoints = new VectorOfKeyPoint();
                  surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);
 
                  //wait for the stream to complete its tasks
                  //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                  stream.WaitForCompletion();


        System.Drawing.Rectangle rect = modelImage.ROI;
                  gpuMask.Download(mask);
        Matrix<float> orginalCornerCoordinate = new Matrix<float>(new float[,]
                  gpuMatchIndices.Download(indices);
            {{  rect.Left, rect.Bottom, 1.0f},
              { rect.Right, rect.Bottom, 1.0f},
              { rect.Right, rect.Top, 1.0f},
              { rect.Left, rect.Top, 1.0f}});


        Matrix<float> destCornerCoordinate = homographyMatrix * orginalCornerCoordinate.Transpose();
                  if (GpuInvoke.CountNonZero(gpuMask) >= 4)
        float[,] destCornerCoordinateArray = destCornerCoordinate.Data;
                  {
                    int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                  }


        Point[] destCornerPoints = new Point[4];
                  watch.Stop();
         for (int i = 0; i < destCornerPoints.Length; i++)
              }
            }
         } else
         {
         {
             float denominator = destCornerCoordinateArray[2, i];
             //extract features from the object image
             destCornerPoints[i] = new Point(
            modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null);
              (int)(destCornerCoordinateArray[0, i] / denominator),
             Matrix<float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);
              (int)(destCornerCoordinateArray[1, i] / denominator) + modelImage.Height);
        }


        res.DrawPolyline(destCornerPoints, true, new Gray(255.0), 5);
            watch = Stopwatch.StartNew();
        #endregion


        ImageViewer.Show(res);
            // extract features from the observed image
      }
            observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null);
            Matrix<float> observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
            BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);
            matcher.Add(modelDescriptors);


      private static void MatchSURFFeatureWithFeatureTree(SURFFeature[] modelFeatures, SURFFeature[] imageFeatures, double matchDistanceRatio, int[,] result1, double[,] dist1, List<PointF> modelPointList, List<PointF> imagePointList)
            indices = new Matrix<int>(observedDescriptors.Rows, k);
      {
            using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
        for (int i = 0; i < result1.GetLength(0); i++)
            {
        {
              matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
            int bestMatchedIndex = dist1[i, 0] < dist1[i, 1] ? result1[i, 0] : result1[i, 1];
              mask = new Matrix<byte>(dist.Rows, 1);
            int secondBestMatchedIndex = dist1[i, 0] < dist1[i, 1] ? result1[i, 1] : result1[i, 0];
              mask.SetValue(255);
              Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
            }


             SURFFeature bestMatchedModelPoint = bestMatchedIndex >= 0 ? modelFeatures[bestMatchedIndex] : null;
             int nonZeroCount = CvInvoke.cvCountNonZero(mask);
            SURFFeature secondBestMatchedModelPoint = secondBestMatchedIndex > 0 ? modelFeatures[secondBestMatchedIndex] : null;
             if (nonZeroCount >= 4)
             if (bestMatchedModelPoint != null)
             {
             {
               double distanceRatio = dist1[i, 0] / dist1[i, 1];
               nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
               if (secondBestMatchedModelPoint == null || distanceRatio <= matchDistanceRatio || distanceRatio >= (1.0 / matchDistanceRatio))
               if (nonZeroCount >= 4)
              {  //this is a unique / almost unique match
                   homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                   modelPointList.Add(bestMatchedModelPoint.Point.pt);
                  imagePointList.Add(imageFeatures[i].Point.pt);
              }
             }
             }
            watch.Stop();
         }
         }
        //Draw the matched keypoints
        Image<Bgr, Byte> result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
            indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);
        #region draw the projected region on the image
        if (homography != null)
        {  //draw a rectangle along the projected model
            Rectangle rect = modelImage.ROI;
            PointF[] pts = new PointF[] {
              new PointF(rect.Left, rect.Bottom),
              new PointF(rect.Right, rect.Bottom),
              new PointF(rect.Right, rect.Top),
              new PointF(rect.Left, rect.Top)};
            homography.ProjectPoints(pts);
            result.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 5);
        }
        #endregion
        matchTime = watch.ElapsedMilliseconds;
        return result;
       }
       }
   }
   }
}
}
</source>
</source>
</div>
</div>
== Performance Comparison ==
{| style="text-align:center" border="1px" cellpadding="10" cellspacing="0"
!CPU|| GPU || Emgu CV Package || Execution Time (millisecond)
|-
| <del>Core i7-2630QM@2.0Ghz</del> || '''NVidia GeForce GTX560M''' || libemgucv-windows-x64-2.4.0.1714 || 87
|-
| '''Core i7-2630QM@2.0Ghz''' || <del>NVidia GeForce GTX560M</del> || libemgucv-windows-x64-2.4.0.1714 || 192
|-
| LG G Flex 2 (Android) || || libemgucv-android-3.1.0.2298 || 432
|}


== Result ==
== Result ==
*Windows
[[image:SURFExample.png]]
[[image:SURFExample.png]]
*Android (Nexus S)
[[File:MonoAndroidSURFFeatureResultNexusS.jpg | 500px]]

Latest revision as of 15:10, 20 February 2016

This project is part of the Emgu.CV.Example solution

System Requirement

Component Requirement Detail
Emgu CV Version 2.4.0 +
Operation System Cross Platform

Source Code

Emgu CV 3.x

Click to view source code

using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Drawing;
using System.Runtime.InteropServices;
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Features2D;
using Emgu.CV.Structure;
using Emgu.CV.Util;
#if !__IOS__
using Emgu.CV.Cuda;
#endif
using Emgu.CV.XFeatures2D;

namespace SURFFeatureExample
{
   public static class DrawMatches
   {
      public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
      {
         int k = 2;
         double uniquenessThreshold = 0.8;
         double hessianThresh = 300;
         
         Stopwatch watch;
         homography = null;

         modelKeyPoints = new VectorOfKeyPoint();
         observedKeyPoints = new VectorOfKeyPoint();

         #if !__IOS__
         if ( CudaInvoke.HasCuda)
         {
            CudaSURF surfCuda = new CudaSURF((float) hessianThresh);
            using (GpuMat gpuModelImage = new GpuMat(modelImage))
            //extract features from the object image
            using (GpuMat gpuModelKeyPoints = surfCuda.DetectKeyPointsRaw(gpuModelImage, null))
            using (GpuMat gpuModelDescriptors = surfCuda.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
            using (CudaBFMatcher matcher = new CudaBFMatcher(DistanceType.L2))
            {
               surfCuda.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
               watch = Stopwatch.StartNew();

               // extract features from the observed image
               using (GpuMat gpuObservedImage = new GpuMat(observedImage))
               using (GpuMat gpuObservedKeyPoints = surfCuda.DetectKeyPointsRaw(gpuObservedImage, null))
               using (GpuMat gpuObservedDescriptors = surfCuda.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
               //using (GpuMat tmp = new GpuMat())
               //using (Stream stream = new Stream())
               {
                  matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k);

                  surfCuda.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                  mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                  mask.SetTo(new MCvScalar(255));
                  Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                  int nonZeroCount = CvInvoke.CountNonZero(mask);
                  if (nonZeroCount >= 4)
                  {
                     nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                        matches, mask, 1.5, 20);
                     if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                           observedKeyPoints, matches, mask, 2);
                  }
               }
                  watch.Stop();
               }
            }
         else
         #endif
         {
            using (UMat uModelImage = modelImage.ToUMat(AccessType.Read))
            using (UMat uObservedImage = observedImage.ToUMat(AccessType.Read))
            {
               SURF surfCPU = new SURF(hessianThresh);
               //extract features from the object image
               UMat modelDescriptors = new UMat();
               surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

               watch = Stopwatch.StartNew();

               // extract features from the observed image
               UMat observedDescriptors = new UMat();
               surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
               BFMatcher matcher = new BFMatcher(DistanceType.L2);
               matcher.Add(modelDescriptors);

               matcher.KnnMatch(observedDescriptors, matches, k, null);
               mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
               mask.SetTo(new MCvScalar(255));
               Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

               int nonZeroCount = CvInvoke.CountNonZero(mask);
               if (nonZeroCount >= 4)
               {
                  nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                     matches, mask, 1.5, 20);
                  if (nonZeroCount >= 4)
                     homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                        observedKeyPoints, matches, mask, 2);
               }

               watch.Stop();
            }
         }
         matchTime = watch.ElapsedMilliseconds;
      }

      /// <summary>
      /// Draw the model image and observed image, the matched features and homography projection.
      /// </summary>
      /// <param name="modelImage">The model image</param>
      /// <param name="observedImage">The observed image</param>
      /// <param name="matchTime">The output total time for computing the homography matrix.</param>
      /// <returns>The model image and observed image, the matched features and homography projection.</returns>
      public static Mat Draw(Mat modelImage, Mat observedImage, out long matchTime)
      {
         Mat homography;
         VectorOfKeyPoint modelKeyPoints;
         VectorOfKeyPoint observedKeyPoints;
         using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
         {
            Mat mask;
            FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, matches,
               out mask, out homography);

            //Draw the matched keypoints
            Mat result = new Mat();
            Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
               matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);

            #region draw the projected region on the image

            if (homography != null)
            {
               //draw a rectangle along the projected model
               Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
               PointF[] pts = new PointF[]
               {
                  new PointF(rect.Left, rect.Bottom),
                  new PointF(rect.Right, rect.Bottom),
                  new PointF(rect.Right, rect.Top),
                  new PointF(rect.Left, rect.Top)
               };
               pts = CvInvoke.PerspectiveTransform(pts, homography);

               Point[] points = Array.ConvertAll<PointF, Point>(pts, Point.Round);
               using (VectorOfPoint vp = new VectorOfPoint(points))
               {
                  CvInvoke.Polylines(result, vp, true, new MCvScalar(255, 0, 0, 255), 5);
               }
               
            }

            #endregion

            return result;

         }
      }
   }
}

Emgu CV 2.x

Click to view source code

using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Drawing;
using System.Runtime.InteropServices;
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Features2D;
using Emgu.CV.Structure;
using Emgu.CV.Util;
using Emgu.CV.GPU;

namespace SURFFeatureExample
{
   public static class DrawMatches
   {
      /// <summary>
      /// Draw the model image and observed image, the matched features and homography projection.
      /// </summary>
      /// <param name="modelImage">The model image</param>
      /// <param name="observedImage">The observed image</param>
      /// <param name="matchTime">The output total time for computing the homography matrix.</param>
      /// <returns>The model image and observed image, the matched features and homography projection.</returns>
      public static Image<Bgr, Byte> Draw(Image<Gray, Byte> modelImage, Image<Gray, byte> observedImage, out long matchTime)
      {
         Stopwatch watch;
         HomographyMatrix homography = null;

         SURFDetector surfCPU = new SURFDetector(500, false);
         VectorOfKeyPoint modelKeyPoints;
         VectorOfKeyPoint observedKeyPoints;
         Matrix<int> indices;

         Matrix<byte> mask;
         int k = 2;
         double uniquenessThreshold = 0.8;
         if (GpuInvoke.HasCuda)
         {
            GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
            using (GpuImage<Gray, Byte> gpuModelImage = new GpuImage<Gray, byte>(modelImage))
            //extract features from the object image
            using (GpuMat<float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
            using (GpuMat<float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
            using (GpuBruteForceMatcher<float> matcher = new GpuBruteForceMatcher<float>(DistanceType.L2))
            {
               modelKeyPoints = new VectorOfKeyPoint();
               surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
               watch = Stopwatch.StartNew();

               // extract features from the observed image
               using (GpuImage<Gray, Byte> gpuObservedImage = new GpuImage<Gray, byte>(observedImage))
               using (GpuMat<float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
               using (GpuMat<float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
               using (GpuMat<int> gpuMatchIndices = new GpuMat<int>(gpuObservedDescriptors.Size.Height, k, 1, true))
               using (GpuMat<float> gpuMatchDist = new GpuMat<float>(gpuObservedDescriptors.Size.Height, k, 1, true))
               using (GpuMat<Byte> gpuMask = new GpuMat<byte>(gpuMatchIndices.Size.Height, 1, 1))
               using (Stream stream = new Stream())
               {
                  matcher.KnnMatchSingle(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);
                  indices = new Matrix<int>(gpuMatchIndices.Size);
                  mask = new Matrix<byte>(gpuMask.Size);

                  //gpu implementation of voteForUniquess
                  using (GpuMat<float> col0 = gpuMatchDist.Col(0))
                  using (GpuMat<float> col1 = gpuMatchDist.Col(1))
                  {
                     GpuInvoke.Multiply(col1, new MCvScalar(uniquenessThreshold), col1, stream);
                     GpuInvoke.Compare(col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);
                  }

                  observedKeyPoints = new VectorOfKeyPoint();
                  surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                  //wait for the stream to complete its tasks
                  //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                  stream.WaitForCompletion();

                  gpuMask.Download(mask);
                  gpuMatchIndices.Download(indices);

                  if (GpuInvoke.CountNonZero(gpuMask) >= 4)
                  {
                     int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                     if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                  }

                  watch.Stop();
               }
            }
         } else
         {
            //extract features from the object image
            modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null);
            Matrix<float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

            watch = Stopwatch.StartNew();

            // extract features from the observed image
            observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null);
            Matrix<float> observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
            BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);
            matcher.Add(modelDescriptors);

            indices = new Matrix<int>(observedDescriptors.Rows, k);
            using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
            {
               matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
               mask = new Matrix<byte>(dist.Rows, 1);
               mask.SetValue(255);
               Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
            }

            int nonZeroCount = CvInvoke.cvCountNonZero(mask);
            if (nonZeroCount >= 4)
            {
               nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
               if (nonZeroCount >= 4)
                  homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
            }

            watch.Stop();
         }

         //Draw the matched keypoints
         Image<Bgr, Byte> result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
            indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);

         #region draw the projected region on the image
         if (homography != null)
         {  //draw a rectangle along the projected model
            Rectangle rect = modelImage.ROI;
            PointF[] pts = new PointF[] { 
               new PointF(rect.Left, rect.Bottom),
               new PointF(rect.Right, rect.Bottom),
               new PointF(rect.Right, rect.Top),
               new PointF(rect.Left, rect.Top)};
            homography.ProjectPoints(pts);

            result.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 5);
         }
         #endregion

         matchTime = watch.ElapsedMilliseconds;

         return result;
      }
   }
}

Performance Comparison

CPU GPU Emgu CV Package Execution Time (millisecond)
Core i7-2630QM@2.0Ghz NVidia GeForce GTX560M libemgucv-windows-x64-2.4.0.1714 87
Core i7-2630QM@2.0Ghz NVidia GeForce GTX560M libemgucv-windows-x64-2.4.0.1714 192
LG G Flex 2 (Android) libemgucv-android-3.1.0.2298 432

Result

  • Windows

  • Android (Nexus S)