OpenCV3 On ARM

Source Code Dir

CMake-GUI Configure

Configure/Generate

Unix Makefiles
Specify options for cross-compiling

Choose ARM – LINUX

Operating System : arm-linux
C Compilers :
C++ Compilers :
Target Root : The Cross Compiler BIN Directory

CMAKE_INSTALL_PREFIX

make & make install

  1. make
  2. make install

Maybe Need It !


<>

The END

  1. tar jcvf opencv-arm.tar.bz2 opencv-arm/

Lucas-Kanade & Farneback

LucasKanade Tracker

  1.         // Check if there are points to track
  2.         if(!trackingPoints[0].empty())
  3.         {
  4.             // Status vector to indicate whether the flow for the corresponding features has been found
  5.             vector<uchar> statusVector;
  6.  
  7.             // Error vector to indicate the error for the corresponding feature
  8.             vector<float> errorVector;
  9.  
  10.             // Check if previous image is empty
  11.             if(prevGrayImage.empty())
  12.             {
  13.                 curGrayImage.copyTo(prevGrayImage);
  14.             }
  15.  
  16.             // Calculate the optical flow using Lucas-Kanade algorithm
  17.             calcOpticalFlowPyrLK(prevGrayImage, curGrayImage, trackingPoints[0], trackingPoints[1], statusVector, errorVector, windowSize, 3, terminationCriteria, 0, 0.001);
  18.  
  19.             int count = 0;
  20.  
  21.             // Minimum distance between any two tracking points
  22.             int minDist = 7;
  23.  
  24.             for(int i=0; i < trackingPoints[1].size(); i++)
  25.             {
  26.                 if(pointTrackingFlag)
  27.                 {
  28.                     // If the new point is within 'minDist' distance from an existing point, it will not be tracked
  29.                     if(norm(currentPoint - trackingPoints[1][i]) <= minDist)
  30.                     {
  31.                         pointTrackingFlag = false;
  32.                         continue;
  33.                     }
  34.                 }
  35.  
  36.                 // Check if the status vector is good
  37.                 if(!statusVector[i])
  38.                     continue;
  39.  
  40.                 trackingPoints[1][count++] = trackingPoints[1][i];
  41.  
  42.                 // Draw a filled circle for each of the tracking points
  43.                 int radius = 8;
  44.                 int thickness = 2;
  45.                 int lineType = 8;
  46.                 circle(image, trackingPoints[1][i], radius, Scalar(0,255,0), thickness, lineType);
  47.             }
  48.  
  49.             trackingPoints[1].resize(count);
  50.         }
  51.  
  52.         // Refining the location of the feature points
  53.         if(pointTrackingFlag && trackingPoints[1].size() < maxNumPoints)
  54.         {
  55.             vector<Point2f> tempPoints;
  56.             tempPoints.push_back(currentPoint);
  57.  
  58.             // Function to refine the location of the corners to subpixel accuracy.
  59.             // Here, 'pixel' refers to the image patch of size 'windowSize' and not the actual image pixel
  60.             cornerSubPix(curGrayImage, tempPoints, windowSize, cvSize(-1,-1), terminationCriteria);
  61.  
  62.             trackingPoints[1].push_back(tempPoints[0]);
  63.             pointTrackingFlag = false;
  64.         }

Farneback Tracker

  1.         // Check if the image is valid
  2.         if(prevGray.data)
  3.         {
  4.             // Initialize parameters for the optical flow algorithm
  5.             float pyrScale = 0.5;
  6.             int numLevels = 3;
  7.             int windowSize = 15;
  8.             int numIterations = 3;
  9.             int neighborhoodSize = 5;
  10.             float stdDeviation = 1.2;
  11.  
  12.             // Calculate optical flow map using Farneback algorithm
  13.             calcOpticalFlowFarneback(prevGray, curGray, flowImage, pyrScale, numLevels, windowSize, numIterations, neighborhoodSize, stdDeviation, OPTFLOW_USE_INITIAL_FLOW);
  14.  
  15.             // Convert to 3-channel RGB
  16.             cvtColor(prevGray, flowImageGray, COLOR_GRAY2BGR);
  17.  
  18.             // Draw the optical flow map
  19.             drawOpticalFlow(flowImage, flowImageGray);
  20.  
  21.             // Display the output image
  22.             imshow(windowName, flowImageGray);
  23.         }

Other Morphological Operators

Opening

  1. Mat performOpening(Mat inputImage, int morphologyElement, int morphologySize)
  2. {
  3.     Mat outputImage, tempImage;
  4.     int morphologyType;
  5.  
  6.     if(morphologyElement == 0)
  7.         morphologyType = MORPH_RECT;
  8.  
  9.     else if(morphologyElement == 1)
  10.         morphologyType = MORPH_CROSS;
  11.  
  12.     else if(morphologyElement == 2)
  13.         morphologyType = MORPH_ELLIPSE;
  14.  
  15.     // Create the structuring element for erosion
  16.     Mat element = getStructuringElement(morphologyType, Size(2*morphologySize + 1, 2*morphologySize + 1), Point(morphologySize, morphologySize));
  17.  
  18.     // Apply morphological opening to the image using the structuring element
  19.     erode(inputImage, tempImage, element);
  20.     dilate(tempImage, outputImage, element);
  21.  
  22.     // Return the output image
  23.     return outputImage;
  24. }

Closing

  1. Mat performClosing(Mat inputImage, int morphologyElement, int morphologySize)
  2. {
  3.     Mat outputImage, tempImage;
  4.     int morphologyType;
  5.  
  6.     if(morphologyElement == 0)
  7.         morphologyType = MORPH_RECT;
  8.  
  9.     else if(morphologyElement == 1)
  10.         morphologyType = MORPH_CROSS;
  11.  
  12.     else if(morphologyElement == 2)
  13.         morphologyType = MORPH_ELLIPSE;
  14.  
  15.     // Create the structuring element for erosion
  16.     Mat element = getStructuringElement(morphologyType, Size(2*morphologySize + 1, 2*morphologySize + 1), Point(morphologySize, morphologySize));
  17.  
  18.     // Apply morphological opening to the image using the structuring element
  19.     dilate(inputImage, tempImage, element);
  20.     erode(tempImage, outputImage, element);
  21.  
  22.     // Return the output image
  23.     return outputImage;
  24. }

Morphological Gradient

  1. Mat performMorphologicalGradient(Mat inputImage, int morphologyElement, int morphologySize)
  2. {
  3.     Mat outputImage, tempImage1, tempImage2;
  4.     int morphologyType;
  5.  
  6.     if(morphologyElement == 0)
  7.         morphologyType = MORPH_RECT;
  8.  
  9.     else if(morphologyElement == 1)
  10.         morphologyType = MORPH_CROSS;
  11.  
  12.     else if(morphologyElement == 2)
  13.         morphologyType = MORPH_ELLIPSE;
  14.  
  15.     // Create the structuring element for erosion
  16.     Mat element = getStructuringElement(morphologyType, Size(2*morphologySize + 1, 2*morphologySize + 1), Point(morphologySize, morphologySize));
  17.  
  18.     // Apply morphological gradient to the image using the structuring element
  19.     dilate(inputImage, tempImage1, element);
  20.     erode(inputImage, tempImage2, element);
  21.  
  22.     // Return the output image
  23.     return tempImage1 - tempImage2;
  24. }

TopHat

  1. Mat performTopHat(Mat inputImage, int morphologyElement, int morphologySize)
  2. {
  3.     Mat outputImage;
  4.     int morphologyType;
  5.  
  6.     if(morphologyElement == 0)
  7.         morphologyType = MORPH_RECT;
  8.  
  9.     else if(morphologyElement == 1)
  10.         morphologyType = MORPH_CROSS;
  11.  
  12.     else if(morphologyElement == 2)
  13.         morphologyType = MORPH_ELLIPSE;
  14.  
  15.     // Create the structuring element for erosion
  16.     Mat element = getStructuringElement(morphologyType, Size(2*morphologySize + 1, 2*morphologySize + 1), Point(morphologySize, morphologySize));
  17.  
  18.     // Apply top hat operation to the image using the structuring element
  19.     outputImage = inputImage - performOpening(inputImage, morphologyElement, morphologySize);
  20.  
  21.     // Return the output image
  22.     return outputImage;
  23. }

BlackHat

  1. Mat performBlackHat(Mat inputImage, int morphologyElement, int morphologySize)
  2. {
  3.     Mat outputImage;
  4.     int morphologyType;
  5.  
  6.     if(morphologyElement == 0)
  7.         morphologyType = MORPH_RECT;
  8.  
  9.     else if(morphologyElement == 1)
  10.         morphologyType = MORPH_CROSS;
  11.  
  12.     else if(morphologyElement == 2)
  13.         morphologyType = MORPH_ELLIPSE;
  14.  
  15.     // Create the structuring element for erosion
  16.     Mat element = getStructuringElement(morphologyType, Size(2*morphologySize + 1, 2*morphologySize + 1), Point(morphologySize, morphologySize));
  17.  
  18.     // Apply black hat operation to the image using the structuring element
  19.     outputImage = performClosing(inputImage, morphologyElement, morphologySize) - inputImage;
  20.  
  21.     // Return the output image
  22.     return outputImage;
  23. }

Slimming/Thickening The Shapes

Erosion

  1. Mat performErosion(Mat inputImage, int erosionElement, int erosionSize)
  2. {
  3.     Mat outputImage;
  4.     int erosionType;
  5.  
  6.     if(erosionElement == 0)
  7.         erosionType = MORPH_RECT;
  8.  
  9.     else if(erosionElement == 1)
  10.         erosionType = MORPH_CROSS;
  11.  
  12.     else if(erosionElement == 2)
  13.         erosionType = MORPH_ELLIPSE;
  14.  
  15.     // Create the structuring element for erosion
  16.     Mat element = getStructuringElement(erosionType, Size(2*erosionSize + 1, 2*erosionSize + 1), Point(erosionSize, erosionSize));
  17.  
  18.     // Erode the image using the structuring element
  19.     erode(inputImage, outputImage, element);
  20.  
  21.     // Return the output image
  22.     return outputImage;
  23. }

Dilation

  1. Mat performDilation(Mat inputImage, int dilationElement, int dilationSize)
  2. {
  3.     Mat outputImage;
  4.     int dilationType;
  5.  
  6.     if(dilationElement == 0)
  7.         dilationType = MORPH_RECT;
  8.  
  9.     else if(dilationElement == 1)
  10.         dilationType = MORPH_CROSS;
  11.  
  12.     else if(dilationElement == 2)
  13.         dilationType = MORPH_ELLIPSE;
  14.  
  15.     // Create the structuring element for dilation
  16.     Mat element = getStructuringElement(dilationType, Size(2*dilationSize + 1, 2*dilationSize + 1), Point(dilationSize, dilationSize));
  17.  
  18.     // Dilate the image using the structuring element
  19.     dilate(inputImage, outputImage, element);
  20.  
  21.     // Return the output image
  22.     return outputImage;
  23. }

Background Subtraction VS Frame Differencing

Background Subtraction

  1.     // Create MOG Background Subtractor object
  2.     pMOG= cv::bgsegm::createBackgroundSubtractorMOG();//new BackgroundSubtractorMOG();
  3.  
  4.     // Create MOG2 Background Subtractor object
  5.     pMOG2 = createBackgroundSubtractorMOG2(20, 16, true);//new BackgroundSubtractorMOG2();
  6.  
  7.         // Capture the current frame
  8.         cap >> frame;
  9.  
  10.         // Resize the frame
  11.         resize(frame, frame, Size(), scalingFactor, scalingFactor, INTER_AREA);
  12.  
  13.         // Update the MOG background model based on the current frame
  14.         pMOG->apply(frame, fgMaskMOG);
  15.  
  16.         // Update the MOG2 background model based on the current frame
  17.         pMOG2->apply(frame, fgMaskMOG2);
  18.  
  19.         // Show the current frame
  20.         //imshow("Frame", frame);
  21.  
  22.         // Show the MOG foreground mask
  23.         //imshow("FG Mask MOG", fgMaskMOG);
  24.  
  25.         // Show the MOG2 foreground mask
  26.         imshow("FG Mask MOG 2", fgMaskMOG2);

Frame Differencing

  1. Mat frameDiff(Mat prevFrame, Mat curFrame, Mat nextFrame)
  2. {
  3.     Mat diffFrames1, diffFrames2, output;
  4.  
  5.     // Compute absolute difference between current frame and the next frame
  6.     absdiff(nextFrame, curFrame, diffFrames1);
  7.  
  8.     // Compute absolute difference between current frame and the previous frame
  9.     absdiff(curFrame, prevFrame, diffFrames2);
  10.  
  11.     // Bitwise "AND" operation between the above two diff images
  12.     bitwise_and(diffFrames1, diffFrames2, output);
  13.  
  14.     return output;
  15. }

Tracking Your Ears

Tracking Your Ears (Two Ears, \(^o^)/~)

  1.         // Capture the current frame
  2.         cap >> frame;
  3.  
  4.         // Resize the frame
  5.         resize(frame, frame, Size(), scalingFactor, scalingFactor, INTER_AREA);
  6.  
  7.         // Convert to grayscale
  8.         cvtColor(frame, frameGray, CV_BGR2GRAY);
  9.  
  10.         // Equalize the histogram
  11.         equalizeHist(frameGray, frameGray);
  12.  
  13.         // Detect left ear
  14.         leftEarCascade.detectMultiScale(frameGray, leftEars, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
  15.  
  16.         // Detect right ear
  17.         rightEarCascade.detectMultiScale(frameGray, rightEars, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
  18.  
  19.         // Draw green rectangle around the left ear
  20.         for(int i = 0; i < leftEars.size(); i++)
  21.         {
  22.             Rect leftEarRect(leftEars[i].x, leftEars[i].y, leftEars[i].width, leftEars[i].height);
  23.             rectangle(frame, leftEarRect, Scalar(0,255,0), 4);
  24.         }
  25.  
  26.         // Draw green rectangle around the right ear
  27.         for(int i = 0; i < rightEars.size(); i++)
  28.         {
  29.             Rect rightEarRect(rightEars[i].x, rightEars[i].y, rightEars[i].width, rightEars[i].height);
  30.             rectangle(frame, rightEarRect, Scalar(0,255,0), 4);
  31.         }

Overlaying A Facemask

Detect Faces

  1.         // Capture the current frame
  2.         cap >> frame;
  3.  
  4.         // Resize the frame
  5.         resize(frame, frame, Size(), scalingFactor, scalingFactor, INTER_AREA);
  6.  
  7.         // Convert to grayscale
  8.         cvtColor(frame, frameGray, CV_BGR2GRAY);
  9.  
  10.         // Equalize the histogram
  11.         equalizeHist(frameGray, frameGray);
  12.  
  13.         // Detect faces
  14.         faceCascade.detectMultiScale(frameGray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );

Add Mask Image

  1.         // Draw green rectangle around the face
  2.         for(int i = 0; i < faces.size(); i++)
  3.         {
  4.             //Rect faceRect(faces[i].x, faces[i].y, faces[i].width, faces[i].height);
  5.  
  6.             // Custom parameters to make the mask fit your face. You may have to play around with them to make sure it works.
  7.             int x = faces[i].x - int(0.1*faces[i].width);
  8.             int y = faces[i].y - int(0.1*faces[i].height); // 0.0*faces[i].height
  9.             int w = int(1.1 * faces[i].width);
  10.             int h = int(1.3 * faces[i].height);
  11.  
  12.             // Extract region of interest (ROI) covering your face
  13.             frameROI = frame(Rect(x,y,w,h));
  14.  
  15.             // Resize the face mask image based on the dimensions of the above ROI
  16.             resize(faceMask, faceMaskSmall, Size(w,h));
  17.  
  18.             // Convert the above image to grayscale
  19.             cvtColor(faceMaskSmall, grayMaskSmall, CV_BGR2GRAY);
  20.  
  21.             // Threshold the above image to isolate the pixels associated only with the face mask
  22.             threshold(grayMaskSmall, grayMaskSmallThresh, 230, 255, CV_THRESH_BINARY_INV);
  23.  
  24.             // Create mask by inverting the above image (because we don't want the background to affect the overlay)
  25.             bitwise_not(grayMaskSmallThresh, grayMaskSmallThreshInv);
  26.  
  27.             // Use bitwise "AND" operator to extract precise boundary of face mask
  28.             bitwise_and(faceMaskSmall, faceMaskSmall, maskedFace, grayMaskSmallThresh);
  29.  
  30.             // Use bitwise "AND" operator to overlay face mask
  31.             bitwise_and(frameROI, frameROI, maskedFrame, grayMaskSmallThreshInv);
  32.  
  33.             // Add the above masked images and place it in the original frame ROI to create the final image
  34.             add(maskedFace, maskedFrame, frame(Rect(x,y,w,h)));
  35.         }

Noise removal & Lighting removal & Binarization

Remove Noise

  1.   Mat img_noise, img_box_smooth;
  2.   medianBlur(img, img_noise, 3);
  3.   blur(img, img_box_smooth, Size(3,3));

Remove Light

  1.   // Load image to process
  2.   Mat light_pattern= imread(light_pattern_file, 0);
  3.   if(light_pattern.data==NULL){
  4.     // Calculate light pattern
  5.     light_pattern= calculateLightPattern(img_noise);
  6.   }
  7.   medianBlur(light_pattern, light_pattern, 3);
  8.  
  9.   //Apply the light pattern
  10.   Mat img_no_light;
  11.   img_noise.copyTo(img_no_light);	
  12.   if(method_light!=2){
  13.   	img_no_light= removeLight(img_noise, light_pattern, method_light);	
  14.   }

Binarize

  1.   // Binarize image for segment
  2.   Mat img_thr;
  3.   if(method_light!=2){
  4.   	threshold(img_no_light, img_thr, 30, 255, THRESH_BINARY);
  5.   }else{
  6.   	threshold(img_no_light, img_thr, 140, 255, THRESH_BINARY_INV);
  7.   }

Segmenting image(The connected components & The findContours function)

Connected Components

  1. void ConnectedComponents(Mat img)
  2. {
  3.   // Use connected components to divide our possibles parts of images 
  4.   Mat labels;
  5.   int num_objects= connectedComponents(img, labels);
  6.   // Check the number of objects detected
  7.   if(num_objects < 2 ){
  8.     cout << "No objects detected" << endl;
  9.     return;
  10.   }else{
  11.     cout << "Number of objects detected: " << num_objects - 1 << endl;
  12.   }
  13.   // Create output image coloring the objects
  14.   Mat output= Mat::zeros(img.rows,img.cols, CV_8UC3);
  15.   RNG rng( 0xFFFFFFFF );
  16.   for(int i=1; i<num_objects; i++){
  17.     Mat mask= labels==i;
  18.     output.setTo(randomColor(rng), mask);
  19.   }
  20.   imshow("Result", output);
  21.   miw->addImage("Result", output);
  22. }
  23.  
  24. void ConnectedComponentsStats(Mat img)
  25. {
  26.   // Use connected components with stats
  27.   Mat labels, stats, centroids;
  28.   int num_objects= connectedComponentsWithStats(img, labels, stats, centroids);
  29.   // Check the number of objects detected
  30.   if(num_objects < 2 ){
  31.     cout << "No objects detected" << endl;
  32.     return;
  33.   }else{
  34.     cout << "Number of objects detected: " << num_objects - 1 << endl;
  35.   }
  36.   // Create output image coloring the objects and show area
  37.   Mat output= Mat::zeros(img.rows,img.cols, CV_8UC3);
  38.   RNG rng( 0xFFFFFFFF );
  39.   for(int i=1; i<num_objects; i++){
  40.     cout << "Object "<< i << " with pos: " << centroids.at<Point2d>(i) << " with area " << stats.at<int>(i, CC_STAT_AREA) << endl;
  41.     Mat mask= labels==i;
  42.     output.setTo(randomColor(rng), mask);
  43.     // draw text with area
  44.     stringstream ss;
  45.     ss << "area: " << stats.at<int>(i, CC_STAT_AREA);
  46.  
  47.     putText(output, 
  48.       ss.str(), 
  49.       centroids.at<Point2d>(i), 
  50.       FONT_HERSHEY_SIMPLEX, 
  51.       0.4, 
  52.       Scalar(255,255,255));
  53.   }
  54.   imshow("Result", output);
  55.   miw->addImage("Result", output);
  56. }

Find Contours Basic

  1. void FindContoursBasic(Mat img)
  2. {
  3.   vector<vector<Point> > contours;
  4.   findContours(img, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
  5.   Mat output= Mat::zeros(img.rows,img.cols, CV_8UC3);
  6.   // Check the number of objects detected
  7.   if(contours.size() == 0 ){
  8.     cout << "No objects detected" << endl;
  9.     return;
  10.   }else{
  11.     cout << "Number of objects detected: " << contours.size() << endl;
  12.   }
  13.   RNG rng( 0xFFFFFFFF );
  14.   for(int i=0; i<contours.size(); i++)
  15.     drawContours(output, contours, i, randomColor(rng));
  16.   imshow("Result", output);
  17.   miw->addImage("Result", output);
  18. }

The Scene Text Detection & Extraction & Recognition

Separate Channels

  1. vector<Mat> separateChannels(Mat& src)
  2. {
  3. 	vector<Mat> channels;
  4. 	//Grayscale images
  5. 	if (src.type() == CV_8U || src.type() == CV_8UC1) {
  6. 		channels.push_back(src);
  7. 		channels.push_back(255-src);
  8. 		return channels;
  9. 	}
  10.  
  11. 	//Colored images
  12. 	if (src.type() == CV_8UC3) {
  13. 		computeNMChannels(src, channels);
  14. 		int size = static_cast<int>(channels.size())-1;
  15. 		for (int c = 0; c < size; c++)
  16. 			channels.push_back(255-channels[c]);
  17. 		return channels;
  18. 	}
  19.  
  20. 	//Other types
  21. 	cout << "Invalid image format!" << endl;
  22. 	exit(-1);
  23. }

Detection & Draw Groups Boxes

  1.     //Convert the input image to grayscale.
  2.     //Just do Mat processed = input; to work with colors.
  3.     Mat processed;
  4.     cvtColor(input, processed, CV_RGB2GRAY);
  5.  
  6.     auto channels = separateChannels(processed);
  7.  
  8.     // Create ERFilter objects with the 1st and 2nd stage classifiers
  9.     auto filter1 = createERFilterNM1(loadClassifierNM1("trained_classifierNM1.xml"),15,0.00015f,0.13f,0.2f,true,0.1f);
  10.     auto filter2 = createERFilterNM2(loadClassifierNM2("trained_classifierNM2.xml"),0.5);
  11.  
  12.     //Extract text regions using Newmann & Matas algorithm
  13.     cout << "Processing " << channels.size() << " channels..." << endl;
  14.     vector<vector<ERStat> > regions(channels.size());
  15.     for (int c=0; c < channels.size(); c++)
  16.     {
  17.         cout << "    Channel " << (c+1) << endl;
  18.         filter1->run(channels[c], regions[c]);
  19.         filter2->run(channels[c], regions[c]);
  20.     }
  21.     filter1.release();
  22.     filter2.release();
  23.  
  24.     //Separate character groups from regions
  25.     vector< vector<Vec2i> > groups;
  26.     vector<Rect> groupRects;
  27.     erGrouping(input, channels, regions, groups, groupRects, ERGROUPING_ORIENTATION_HORIZ);
  28.     //erGrouping(input, channels, regions, groups, groupRects, ERGROUPING_ORIENTATION_ANY, "trained_classifier_erGrouping.xml", 0.5);
  29.  
  30.     // draw groups boxes
  31.     for (auto rect : groupRects)
  32.         rectangle(input, rect, Scalar(0, 255, 0), 3);

Get Extremal Region

  1. Mat drawER(const vector<Mat> &channels, const vector<vector<ERStat> > &regions, const vector<Vec2i>& group, const Rect& rect)
  2. {
  3.     Mat out = Mat::zeros(channels[0].rows+2, channels[0].cols+2, CV_8UC1);
  4.  
  5.     int flags = 4					//4 neighbors
  6.                 + (255 << 8)				//paint mask in white (255)
  7. 		+ FLOODFILL_FIXED_RANGE		//fixed range
  8. 		+ FLOODFILL_MASK_ONLY;		//Paint just the mask
  9.  
  10.     for (int g=0; g < group.size(); g++)
  11.     {
  12.         int idx = group[g][0];
  13.         ERStat er = regions[idx][group[g][1]];
  14.  
  15.         //Ignore root region
  16.         if (er.parent == NULL)
  17.             continue;
  18.  
  19.     //Transform the linear pixel value to row and col
  20.     int px = er.pixel % channels[idx].cols;
  21.     int py = er.pixel / channels[idx].cols;
  22.  
  23.     //Create the point and adds it to the list.
  24.     Point p(px, py);
  25.  
  26.     //Draw the extremal region
  27.     floodFill(
  28.                 channels[idx], out,				//Image and mask
  29.                 p, Scalar(255),					//Seed and color
  30.                 nullptr,						//No rect
  31.                 Scalar(er.level),Scalar(0),		//LoDiff and upDiff
  32.                 flags							//Flags
  33.             );
  34.     }
  35.  
  36.     //Crop just the text area and find it's points
  37.     out = out(rect);
  38.  
  39.     vector<Point> points;
  40.     findNonZero(out, points);
  41.     //Use deskew and crop to crop it perfectly
  42.     return deskewAndCrop(out, minAreaRect(points));
  43. }

Create ERFilter

  1.     // Create ERFilter objects with the 1st and 2nd stage classifiers
  2.     auto filter1 = createERFilterNM1(loadClassifierNM1("trained_classifierNM1.xml"),15,0.00015f,0.13f,0.2f,true,0.1f);
  3.     auto filter2 = createERFilterNM2(loadClassifierNM2("trained_classifierNM2.xml"),0.5);
  4.  
  5.     //Extract text regions using Newmann & Matas algorithm
  6.     cout << "Processing " << channels.size() << " channels..." << endl;
  7.     vector<vector<ERStat> > regions(channels.size());
  8.     for (int c=0; c < channels.size(); c++)
  9.     {
  10.         cout << "    Channel " << (c+1) << endl;
  11.         filter1->run(channels[c], regions[c]);
  12.         filter2->run(channels[c], regions[c]);
  13.     }
  14.     filter1.release();
  15.     filter2.release();

Separate Characters & OCR->Run

  1.     //Separate character groups from regions
  2.     vector< vector<Vec2i> > groups;
  3.     vector<Rect> groupRects;
  4.     erGrouping(input, channels, regions, groups, groupRects, ERGROUPING_ORIENTATION_HORIZ);
  5.     //erGrouping(input, channels, regions, groups, groupRects, ERGROUPING_ORIENTATION_ANY, "trained_classifier_erGrouping.xml", 0.5);
  6.  
  7.     // text detection
  8.     cout << endl << "Detected text:" << endl;
  9.     cout << "-------------" << endl;
  10.     auto ocr = initOCR("tesseract");
  11.     for (int i = 0; i < groups.size(); i++)
  12.     {
  13.         Mat wordImage = drawER(channels, regions, groups[i], groupRects[i]);
  14.  
  15.         string word;
  16.         ocr->run(wordImage, word);
  17.         cout << word << endl;
  18.     }