MediaWiki API result

This is the HTML representation of the JSON format. HTML is good for debugging, but is unsuitable for application use.

Specify the format parameter to change the output format. To see the non-HTML representation of the JSON format, set format=json.

See the complete documentation, or the API help for more information.

{
    "batchcomplete": "",
    "continue": {
        "gapcontinue": "SVN",
        "continue": "gapcontinue||"
    },
    "query": {
        "pages": {
            "59": {
                "pageid": 59,
                "ns": 0,
                "title": "SURF feature detector in CSharp",
                "revisions": [
                    {
                        "contentformat": "text/x-wiki",
                        "contentmodel": "wikitext",
                        "*": "<font color=green>'''This project is part of the Emgu.CV.Example solution'''</font>\n\n== System Requirement ==\n{| style=\"text-align:center\" border=\"1px\" cellpadding=\"10\" cellspacing=\"0\"\n!Component || Requirement || Detail \n|-\n|Emgu CV || [[Version_History#Emgu.CV-2.4.0|Version 2.4.0]] + ||  \n|-\n|Operation System || Cross Platform || \n|}\n\n== Source Code ==\n=== Emgu CV 3.x ===\n<div class=\"toccolours mw-collapsible mw-collapsed\">\nClick to view source code\n<div class=\"mw-collapsible-content\">\n<source lang=\"csharp\">\nusing System;\nusing System.Collections.Generic;\nusing System.Diagnostics;\nusing System.Drawing;\nusing System.Runtime.InteropServices;\nusing Emgu.CV;\nusing Emgu.CV.CvEnum;\nusing Emgu.CV.Features2D;\nusing Emgu.CV.Structure;\nusing Emgu.CV.Util;\n#if !__IOS__\nusing Emgu.CV.Cuda;\n#endif\nusing Emgu.CV.XFeatures2D;\n\nnamespace SURFFeatureExample\n{\n   public static class DrawMatches\n   {\n      public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)\n      {\n         int k = 2;\n         double uniquenessThreshold = 0.8;\n         double hessianThresh = 300;\n         \n         Stopwatch watch;\n         homography = null;\n\n         modelKeyPoints = new VectorOfKeyPoint();\n         observedKeyPoints = new VectorOfKeyPoint();\n\n         #if !__IOS__\n         if ( CudaInvoke.HasCuda)\n         {\n            CudaSURF surfCuda = new CudaSURF((float) hessianThresh);\n            using (GpuMat gpuModelImage = new GpuMat(modelImage))\n            //extract features from the object image\n            using (GpuMat gpuModelKeyPoints = surfCuda.DetectKeyPointsRaw(gpuModelImage, null))\n            using (GpuMat gpuModelDescriptors = surfCuda.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))\n            using (CudaBFMatcher matcher = new CudaBFMatcher(DistanceType.L2))\n            {\n               surfCuda.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);\n               watch = Stopwatch.StartNew();\n\n               // extract features from the observed image\n               using (GpuMat gpuObservedImage = new GpuMat(observedImage))\n               using (GpuMat gpuObservedKeyPoints = surfCuda.DetectKeyPointsRaw(gpuObservedImage, null))\n               using (GpuMat gpuObservedDescriptors = surfCuda.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))\n               //using (GpuMat tmp = new GpuMat())\n               //using (Stream stream = new Stream())\n               {\n                  matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k);\n\n                  surfCuda.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);\n\n                  mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);\n                  mask.SetTo(new MCvScalar(255));\n                  Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);\n\n                  int nonZeroCount = CvInvoke.CountNonZero(mask);\n                  if (nonZeroCount >= 4)\n                  {\n                     nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,\n                        matches, mask, 1.5, 20);\n                     if (nonZeroCount >= 4)\n                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,\n                           observedKeyPoints, matches, mask, 2);\n                  }\n               }\n                  watch.Stop();\n               }\n            }\n         else\n         #endif\n         {\n            using (UMat uModelImage = modelImage.ToUMat(AccessType.Read))\n            using (UMat uObservedImage = observedImage.ToUMat(AccessType.Read))\n            {\n               SURF surfCPU = new SURF(hessianThresh);\n               //extract features from the object image\n               UMat modelDescriptors = new UMat();\n               surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);\n\n               watch = Stopwatch.StartNew();\n\n               // extract features from the observed image\n               UMat observedDescriptors = new UMat();\n               surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);\n               BFMatcher matcher = new BFMatcher(DistanceType.L2);\n               matcher.Add(modelDescriptors);\n\n               matcher.KnnMatch(observedDescriptors, matches, k, null);\n               mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);\n               mask.SetTo(new MCvScalar(255));\n               Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);\n\n               int nonZeroCount = CvInvoke.CountNonZero(mask);\n               if (nonZeroCount >= 4)\n               {\n                  nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,\n                     matches, mask, 1.5, 20);\n                  if (nonZeroCount >= 4)\n                     homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,\n                        observedKeyPoints, matches, mask, 2);\n               }\n\n               watch.Stop();\n            }\n         }\n         matchTime = watch.ElapsedMilliseconds;\n      }\n\n      /// <summary>\n      /// Draw the model image and observed image, the matched features and homography projection.\n      /// </summary>\n      /// <param name=\"modelImage\">The model image</param>\n      /// <param name=\"observedImage\">The observed image</param>\n      /// <param name=\"matchTime\">The output total time for computing the homography matrix.</param>\n      /// <returns>The model image and observed image, the matched features and homography projection.</returns>\n      public static Mat Draw(Mat modelImage, Mat observedImage, out long matchTime)\n      {\n         Mat homography;\n         VectorOfKeyPoint modelKeyPoints;\n         VectorOfKeyPoint observedKeyPoints;\n         using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())\n         {\n            Mat mask;\n            FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, matches,\n               out mask, out homography);\n\n            //Draw the matched keypoints\n            Mat result = new Mat();\n            Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,\n               matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);\n\n            #region draw the projected region on the image\n\n            if (homography != null)\n            {\n               //draw a rectangle along the projected model\n               Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);\n               PointF[] pts = new PointF[]\n               {\n                  new PointF(rect.Left, rect.Bottom),\n                  new PointF(rect.Right, rect.Bottom),\n                  new PointF(rect.Right, rect.Top),\n                  new PointF(rect.Left, rect.Top)\n               };\n               pts = CvInvoke.PerspectiveTransform(pts, homography);\n\n               Point[] points = Array.ConvertAll<PointF, Point>(pts, Point.Round);\n               using (VectorOfPoint vp = new VectorOfPoint(points))\n               {\n                  CvInvoke.Polylines(result, vp, true, new MCvScalar(255, 0, 0, 255), 5);\n               }\n               \n            }\n\n            #endregion\n\n            return result;\n\n         }\n      }\n   }\n}\n\n</source>\n</div>\n</div>\n\n=== Emgu CV 2.x ===\n<div class=\"toccolours mw-collapsible mw-collapsed\">\nClick to view source code\n<div class=\"mw-collapsible-content\">\n<source lang=\"csharp\">\nusing System;\nusing System.Collections.Generic;\nusing System.Diagnostics;\nusing System.Drawing;\nusing System.Runtime.InteropServices;\nusing Emgu.CV;\nusing Emgu.CV.CvEnum;\nusing Emgu.CV.Features2D;\nusing Emgu.CV.Structure;\nusing Emgu.CV.Util;\nusing Emgu.CV.GPU;\n\nnamespace SURFFeatureExample\n{\n   public static class DrawMatches\n   {\n      /// <summary>\n      /// Draw the model image and observed image, the matched features and homography projection.\n      /// </summary>\n      /// <param name=\"modelImage\">The model image</param>\n      /// <param name=\"observedImage\">The observed image</param>\n      /// <param name=\"matchTime\">The output total time for computing the homography matrix.</param>\n      /// <returns>The model image and observed image, the matched features and homography projection.</returns>\n      public static Image<Bgr, Byte> Draw(Image<Gray, Byte> modelImage, Image<Gray, byte> observedImage, out long matchTime)\n      {\n         Stopwatch watch;\n         HomographyMatrix homography = null;\n\n         SURFDetector surfCPU = new SURFDetector(500, false);\n         VectorOfKeyPoint modelKeyPoints;\n         VectorOfKeyPoint observedKeyPoints;\n         Matrix<int> indices;\n\n         Matrix<byte> mask;\n         int k = 2;\n         double uniquenessThreshold = 0.8;\n         if (GpuInvoke.HasCuda)\n         {\n            GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);\n            using (GpuImage<Gray, Byte> gpuModelImage = new GpuImage<Gray, byte>(modelImage))\n            //extract features from the object image\n            using (GpuMat<float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))\n            using (GpuMat<float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))\n            using (GpuBruteForceMatcher<float> matcher = new GpuBruteForceMatcher<float>(DistanceType.L2))\n            {\n               modelKeyPoints = new VectorOfKeyPoint();\n               surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);\n               watch = Stopwatch.StartNew();\n\n               // extract features from the observed image\n               using (GpuImage<Gray, Byte> gpuObservedImage = new GpuImage<Gray, byte>(observedImage))\n               using (GpuMat<float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))\n               using (GpuMat<float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))\n               using (GpuMat<int> gpuMatchIndices = new GpuMat<int>(gpuObservedDescriptors.Size.Height, k, 1, true))\n               using (GpuMat<float> gpuMatchDist = new GpuMat<float>(gpuObservedDescriptors.Size.Height, k, 1, true))\n               using (GpuMat<Byte> gpuMask = new GpuMat<byte>(gpuMatchIndices.Size.Height, 1, 1))\n               using (Stream stream = new Stream())\n               {\n                  matcher.KnnMatchSingle(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);\n                  indices = new Matrix<int>(gpuMatchIndices.Size);\n                  mask = new Matrix<byte>(gpuMask.Size);\n\n                  //gpu implementation of voteForUniquess\n                  using (GpuMat<float> col0 = gpuMatchDist.Col(0))\n                  using (GpuMat<float> col1 = gpuMatchDist.Col(1))\n                  {\n                     GpuInvoke.Multiply(col1, new MCvScalar(uniquenessThreshold), col1, stream);\n                     GpuInvoke.Compare(col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);\n                  }\n\n                  observedKeyPoints = new VectorOfKeyPoint();\n                  surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);\n\n                  //wait for the stream to complete its tasks\n                  //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.\n                  stream.WaitForCompletion();\n\n                  gpuMask.Download(mask);\n                  gpuMatchIndices.Download(indices);\n\n                  if (GpuInvoke.CountNonZero(gpuMask) >= 4)\n                  {\n                     int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);\n                     if (nonZeroCount >= 4)\n                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);\n                  }\n\n                  watch.Stop();\n               }\n            }\n         } else\n         {\n            //extract features from the object image\n            modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null);\n            Matrix<float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);\n\n            watch = Stopwatch.StartNew();\n\n            // extract features from the observed image\n            observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null);\n            Matrix<float> observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);\n            BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);\n            matcher.Add(modelDescriptors);\n\n            indices = new Matrix<int>(observedDescriptors.Rows, k);\n            using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))\n            {\n               matcher.KnnMatch(observedDescriptors, indices, dist, k, null);\n               mask = new Matrix<byte>(dist.Rows, 1);\n               mask.SetValue(255);\n               Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);\n            }\n\n            int nonZeroCount = CvInvoke.cvCountNonZero(mask);\n            if (nonZeroCount >= 4)\n            {\n               nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);\n               if (nonZeroCount >= 4)\n                  homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);\n            }\n\n            watch.Stop();\n         }\n\n         //Draw the matched keypoints\n         Image<Bgr, Byte> result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,\n            indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);\n\n         #region draw the projected region on the image\n         if (homography != null)\n         {  //draw a rectangle along the projected model\n            Rectangle rect = modelImage.ROI;\n            PointF[] pts = new PointF[] { \n               new PointF(rect.Left, rect.Bottom),\n               new PointF(rect.Right, rect.Bottom),\n               new PointF(rect.Right, rect.Top),\n               new PointF(rect.Left, rect.Top)};\n            homography.ProjectPoints(pts);\n\n            result.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 5);\n         }\n         #endregion\n\n         matchTime = watch.ElapsedMilliseconds;\n\n         return result;\n      }\n   }\n}\n</source>\n</div>\n</div>\n== Performance Comparison ==\n{| style=\"text-align:center\" border=\"1px\" cellpadding=\"10\" cellspacing=\"0\"\n!CPU|| GPU || Emgu CV Package || Execution Time (millisecond)\n|-\n| <del>Core i7-2630QM@2.0Ghz</del> || '''NVidia GeForce GTX560M''' || libemgucv-windows-x64-2.4.0.1714 || 87\n|-\n| '''Core i7-2630QM@2.0Ghz''' || <del>NVidia GeForce GTX560M</del> || libemgucv-windows-x64-2.4.0.1714 || 192\n|-\n| LG G Flex 2 (Android) || || libemgucv-android-3.1.0.2298 || 432\n|}\n\n== Result ==\n*Windows\n\n[[image:SURFExample.png]]\n\n*Android (Nexus S)\n[[File:MonoAndroidSURFFeatureResultNexusS.jpg | 500px]]"
                    }
                ]
            },
            "79": {
                "pageid": 79,
                "ns": 0,
                "title": "SVM (Support Vector Machine) in CSharp",
                "revisions": [
                    {
                        "contentformat": "text/x-wiki",
                        "contentmodel": "wikitext",
                        "*": "'''The source code of this example is contributed by Albert G. It requires [[Version_History#Emgu.CV-1.5.0.0|Emgu CV 1.5.0.0]]'''\n\n== What is a Support Vector Machine ==\nAccording to [http://en.wikipedia.org/wiki/Support_vector_machine wikipedia],\n\n: ''Support vector machines (SVMs) are a set of related supervised learning methods used for classification and regression. Viewing input data as two sets of vectors in an n-dimensional space, an SVM will construct a separating hyperplane in that space, one which maximizes the margin between the two data sets. To calculate the margin, two parallel hyperplanes are constructed, one on each side of the separating hyperplane, which are \"pushed up against\" the two data sets. Intuitively, a good separation is achieved by the hyperplane that has the largest distance to the neighboring datapoints of both classes, since in general the larger the margin the lower the generalization error of the classifier.''\n\n== Source Code ==\n<source lang=\"csharp\">\nusing System.Drawing;\nusing Emgu.CV.Structure;\nusing Emgu.CV.ML;\nusing Emgu.CV.ML.Structure;\n\n...\n\nint trainSampleCount = 150;\nint sigma = 60;\n\n#region Generate the training data and classes\n\nMatrix<float> trainData = new Matrix<float>(trainSampleCount, 2);\nMatrix<float> trainClasses = new Matrix<float>(trainSampleCount, 1);\n\nImage<Bgr, Byte> img = new Image<Bgr, byte>(500, 500);\n\nMatrix<float> sample = new Matrix<float>(1, 2);\n\nMatrix<float> trainData1 = trainData.GetRows(0, trainSampleCount / 3, 1);\ntrainData1.GetCols(0, 1).SetRandNormal(new MCvScalar(100), new MCvScalar(sigma));\ntrainData1.GetCols(1, 2).SetRandNormal(new MCvScalar(300), new MCvScalar(sigma));\n\nMatrix<float> trainData2 = trainData.GetRows(trainSampleCount / 3, 2 * trainSampleCount / 3, 1);\ntrainData2.SetRandNormal(new MCvScalar(400), new MCvScalar(sigma));\n\nMatrix<float> trainData3 = trainData.GetRows(2 * trainSampleCount / 3, trainSampleCount, 1);\ntrainData3.GetCols(0, 1).SetRandNormal(new MCvScalar(300), new MCvScalar(sigma));\ntrainData3.GetCols(1, 2).SetRandNormal(new MCvScalar(100), new MCvScalar(sigma));\n\nMatrix<float> trainClasses1 = trainClasses.GetRows(0, trainSampleCount / 3, 1);\ntrainClasses1.SetValue(1);\nMatrix<float> trainClasses2 = trainClasses.GetRows(trainSampleCount / 3, 2 * trainSampleCount / 3, 1);\ntrainClasses2.SetValue(2);\nMatrix<float> trainClasses3 = trainClasses.GetRows(2 * trainSampleCount / 3, trainSampleCount, 1);\ntrainClasses3.SetValue(3);\n\n#endregion\n\nusing (SVM model = new SVM())\n{\n   SVMParams p = new SVMParams();\n   p.KernelType = Emgu.CV.ML.MlEnum.SVM_KERNEL_TYPE.LINEAR;\n   p.SVMType = Emgu.CV.ML.MlEnum.SVM_TYPE.C_SVC;\n   p.C = 1;\n   p.TermCrit = new MCvTermCriteria(100, 0.00001);\n\n   //bool trained = model.Train(trainData, trainClasses, null, null, p);\n   bool trained = model.TrainAuto(trainData, trainClasses, null, null, p.MCvSVMParams, 5);\n   \n   for (int i = 0; i < img.Height; i++)\n   {\n      for (int j = 0; j < img.Width; j++)\n      {\n         sample.Data[0, 0] = j;\n         sample.Data[0, 1] = i;\n\n         float response = model.Predict(sample);\n\n         img[i, j] =\n            response == 1 ? new Bgr(90, 0, 0) :\n            response == 2 ? new Bgr(0, 90, 0) :\n            new Bgr(0, 0, 90);\n      }\n   }\n\n   int c = model.GetSupportVectorCount();\n   for (int i = 0; i < c; i++)\n   {\n      float[] v = model.GetSupportVector(i);\n      PointF p1 = new PointF(v[0], v[1]);\n      img.Draw(new CircleF(p1, 4), new Bgr(128, 128, 128), 2);\n   }\n}\n\n// display the original training samples\nfor (int i = 0; i < (trainSampleCount / 3); i++)\n{\n   PointF p1 = new PointF(trainData1[i, 0], trainData1[i, 1]);\n   img.Draw(new CircleF(p1, 2.0f), new Bgr(255, 100, 100), -1);\n   PointF p2 = new PointF(trainData2[i, 0], trainData2[i, 1]);\n   img.Draw(new CircleF(p2, 2.0f), new Bgr(100, 255, 100), -1);\n   PointF p3 = new PointF(trainData3[i, 0], trainData3[i, 1]);\n   img.Draw(new CircleF(p3, 2.0f), new Bgr(100, 100, 255), -1);\n}\n\nEmgu.CV.UI.ImageViewer.Show(img);\n</source>\n\n== Result ==\n[[image:SVM.png]]"
                    }
                ]
            }
        }
    }
}