Segmenting image(The connected components & The findContours function)

Connected Components

  1. void ConnectedComponents(Mat img)
  2. {
  3.   // Use connected components to divide our possibles parts of images 
  4.   Mat labels;
  5.   int num_objects= connectedComponents(img, labels);
  6.   // Check the number of objects detected
  7.   if(num_objects < 2 ){
  8.     cout << "No objects detected" << endl;
  9.     return;
  10.   }else{
  11.     cout << "Number of objects detected: " << num_objects - 1 << endl;
  12.   }
  13.   // Create output image coloring the objects
  14.   Mat output= Mat::zeros(img.rows,img.cols, CV_8UC3);
  15.   RNG rng( 0xFFFFFFFF );
  16.   for(int i=1; i<num_objects; i++){
  17.     Mat mask= labels==i;
  18.     output.setTo(randomColor(rng), mask);
  19.   }
  20.   imshow("Result", output);
  21.   miw->addImage("Result", output);
  22. }
  23.  
  24. void ConnectedComponentsStats(Mat img)
  25. {
  26.   // Use connected components with stats
  27.   Mat labels, stats, centroids;
  28.   int num_objects= connectedComponentsWithStats(img, labels, stats, centroids);
  29.   // Check the number of objects detected
  30.   if(num_objects < 2 ){
  31.     cout << "No objects detected" << endl;
  32.     return;
  33.   }else{
  34.     cout << "Number of objects detected: " << num_objects - 1 << endl;
  35.   }
  36.   // Create output image coloring the objects and show area
  37.   Mat output= Mat::zeros(img.rows,img.cols, CV_8UC3);
  38.   RNG rng( 0xFFFFFFFF );
  39.   for(int i=1; i<num_objects; i++){
  40.     cout << "Object "<< i << " with pos: " << centroids.at<Point2d>(i) << " with area " << stats.at<int>(i, CC_STAT_AREA) << endl;
  41.     Mat mask= labels==i;
  42.     output.setTo(randomColor(rng), mask);
  43.     // draw text with area
  44.     stringstream ss;
  45.     ss << "area: " << stats.at<int>(i, CC_STAT_AREA);
  46.  
  47.     putText(output, 
  48.       ss.str(), 
  49.       centroids.at<Point2d>(i), 
  50.       FONT_HERSHEY_SIMPLEX, 
  51.       0.4, 
  52.       Scalar(255,255,255));
  53.   }
  54.   imshow("Result", output);
  55.   miw->addImage("Result", output);
  56. }

Find Contours Basic

  1. void FindContoursBasic(Mat img)
  2. {
  3.   vector<vector<Point> > contours;
  4.   findContours(img, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
  5.   Mat output= Mat::zeros(img.rows,img.cols, CV_8UC3);
  6.   // Check the number of objects detected
  7.   if(contours.size() == 0 ){
  8.     cout << "No objects detected" << endl;
  9.     return;
  10.   }else{
  11.     cout << "Number of objects detected: " << contours.size() << endl;
  12.   }
  13.   RNG rng( 0xFFFFFFFF );
  14.   for(int i=0; i<contours.size(); i++)
  15.     drawContours(output, contours, i, randomColor(rng));
  16.   imshow("Result", output);
  17.   miw->addImage("Result", output);
  18. }

The Scene Text Detection & Extraction & Recognition

Separate Channels

  1. vector<Mat> separateChannels(Mat& src)
  2. {
  3. 	vector<Mat> channels;
  4. 	//Grayscale images
  5. 	if (src.type() == CV_8U || src.type() == CV_8UC1) {
  6. 		channels.push_back(src);
  7. 		channels.push_back(255-src);
  8. 		return channels;
  9. 	}
  10.  
  11. 	//Colored images
  12. 	if (src.type() == CV_8UC3) {
  13. 		computeNMChannels(src, channels);
  14. 		int size = static_cast<int>(channels.size())-1;
  15. 		for (int c = 0; c < size; c++)
  16. 			channels.push_back(255-channels[c]);
  17. 		return channels;
  18. 	}
  19.  
  20. 	//Other types
  21. 	cout << "Invalid image format!" << endl;
  22. 	exit(-1);
  23. }

Detection & Draw Groups Boxes

  1.     //Convert the input image to grayscale.
  2.     //Just do Mat processed = input; to work with colors.
  3.     Mat processed;
  4.     cvtColor(input, processed, CV_RGB2GRAY);
  5.  
  6.     auto channels = separateChannels(processed);
  7.  
  8.     // Create ERFilter objects with the 1st and 2nd stage classifiers
  9.     auto filter1 = createERFilterNM1(loadClassifierNM1("trained_classifierNM1.xml"),15,0.00015f,0.13f,0.2f,true,0.1f);
  10.     auto filter2 = createERFilterNM2(loadClassifierNM2("trained_classifierNM2.xml"),0.5);
  11.  
  12.     //Extract text regions using Newmann & Matas algorithm
  13.     cout << "Processing " << channels.size() << " channels..." << endl;
  14.     vector<vector<ERStat> > regions(channels.size());
  15.     for (int c=0; c < channels.size(); c++)
  16.     {
  17.         cout << "    Channel " << (c+1) << endl;
  18.         filter1->run(channels[c], regions[c]);
  19.         filter2->run(channels[c], regions[c]);
  20.     }
  21.     filter1.release();
  22.     filter2.release();
  23.  
  24.     //Separate character groups from regions
  25.     vector< vector<Vec2i> > groups;
  26.     vector<Rect> groupRects;
  27.     erGrouping(input, channels, regions, groups, groupRects, ERGROUPING_ORIENTATION_HORIZ);
  28.     //erGrouping(input, channels, regions, groups, groupRects, ERGROUPING_ORIENTATION_ANY, "trained_classifier_erGrouping.xml", 0.5);
  29.  
  30.     // draw groups boxes
  31.     for (auto rect : groupRects)
  32.         rectangle(input, rect, Scalar(0, 255, 0), 3);

Get Extremal Region

  1. Mat drawER(const vector<Mat> &channels, const vector<vector<ERStat> > &regions, const vector<Vec2i>& group, const Rect& rect)
  2. {
  3.     Mat out = Mat::zeros(channels[0].rows+2, channels[0].cols+2, CV_8UC1);
  4.  
  5.     int flags = 4					//4 neighbors
  6.                 + (255 << 8)				//paint mask in white (255)
  7. 		+ FLOODFILL_FIXED_RANGE		//fixed range
  8. 		+ FLOODFILL_MASK_ONLY;		//Paint just the mask
  9.  
  10.     for (int g=0; g < group.size(); g++)
  11.     {
  12.         int idx = group[g][0];
  13.         ERStat er = regions[idx][group[g][1]];
  14.  
  15.         //Ignore root region
  16.         if (er.parent == NULL)
  17.             continue;
  18.  
  19.     //Transform the linear pixel value to row and col
  20.     int px = er.pixel % channels[idx].cols;
  21.     int py = er.pixel / channels[idx].cols;
  22.  
  23.     //Create the point and adds it to the list.
  24.     Point p(px, py);
  25.  
  26.     //Draw the extremal region
  27.     floodFill(
  28.                 channels[idx], out,				//Image and mask
  29.                 p, Scalar(255),					//Seed and color
  30.                 nullptr,						//No rect
  31.                 Scalar(er.level),Scalar(0),		//LoDiff and upDiff
  32.                 flags							//Flags
  33.             );
  34.     }
  35.  
  36.     //Crop just the text area and find it's points
  37.     out = out(rect);
  38.  
  39.     vector<Point> points;
  40.     findNonZero(out, points);
  41.     //Use deskew and crop to crop it perfectly
  42.     return deskewAndCrop(out, minAreaRect(points));
  43. }

Create ERFilter

  1.     // Create ERFilter objects with the 1st and 2nd stage classifiers
  2.     auto filter1 = createERFilterNM1(loadClassifierNM1("trained_classifierNM1.xml"),15,0.00015f,0.13f,0.2f,true,0.1f);
  3.     auto filter2 = createERFilterNM2(loadClassifierNM2("trained_classifierNM2.xml"),0.5);
  4.  
  5.     //Extract text regions using Newmann & Matas algorithm
  6.     cout << "Processing " << channels.size() << " channels..." << endl;
  7.     vector<vector<ERStat> > regions(channels.size());
  8.     for (int c=0; c < channels.size(); c++)
  9.     {
  10.         cout << "    Channel " << (c+1) << endl;
  11.         filter1->run(channels[c], regions[c]);
  12.         filter2->run(channels[c], regions[c]);
  13.     }
  14.     filter1.release();
  15.     filter2.release();

Separate Characters & OCR->Run

  1.     //Separate character groups from regions
  2.     vector< vector<Vec2i> > groups;
  3.     vector<Rect> groupRects;
  4.     erGrouping(input, channels, regions, groups, groupRects, ERGROUPING_ORIENTATION_HORIZ);
  5.     //erGrouping(input, channels, regions, groups, groupRects, ERGROUPING_ORIENTATION_ANY, "trained_classifier_erGrouping.xml", 0.5);
  6.  
  7.     // text detection
  8.     cout << endl << "Detected text:" << endl;
  9.     cout << "-------------" << endl;
  10.     auto ocr = initOCR("tesseract");
  11.     for (int i = 0; i < groups.size(); i++)
  12.     {
  13.         Mat wordImage = drawER(channels, regions, groups[i], groupRects[i]);
  14.  
  15.         string word;
  16.         ocr->run(wordImage, word);
  17.         cout << word << endl;
  18.     }

Tesseract OCR API

Tesseract API — OCR

  1. tesseract::TessBaseAPI ocr;
  2.  
  3. char* identifyText(Mat input, char* language = "eng") 
  4. {	
  5. 	ocr.Init(NULL, language, tesseract::OEM_TESSERACT_ONLY);	
  6. 	ocr.SetVariable("tessedit_write_images", "1");
  7. 	ocr.SetPageSegMode(tesseract::PSM_SINGLE_BLOCK);
  8. 	ocr.SetImage(input.data, input.cols, input.rows, input.elemSize(), input.cols);
  9.  
  10. 	char* text = ocr.GetUTF8Text();
  11. 	cout << "Text:" << endl;
  12. 	cout << text << endl;
  13. 	cout << "Confidence: " << ocr.MeanTextConf() << endl << endl;
  14.  
  15. 	// Get the text    
  16. 	return text;
  17. }

Text Segmentation

Binarize

  1. Mat binarize(Mat input) 
  2. {	
  3. 	//Uses otsu to threshold the input image
  4. 	Mat binaryImage;
  5. 	cvtColor(input, input, CV_BGR2GRAY);
  6. 	threshold(input, binaryImage, 0, 255, THRESH_OTSU);
  7.  
  8. 	//Count the number of black and white pixels
  9. 	int white = countNonZero(binaryImage);
  10. 	int black = binaryImage.size().area() - white;
  11.  
  12. 	//If the image is mostly white (white background), invert it
  13. 	return white < black ? binaryImage : ~binaryImage;
  14. }

Dilate & Find Contours

  1. vector<RotatedRect> findTextAreas(Mat input) {
  2. 	//Dilate the image
  3. 	Mat kernel = getStructuringElement(MORPH_CROSS, Size(3,3));
  4. 	Mat dilated;
  5. 	dilate(input, dilated, kernel, cv::Point(-1, -1), 5);
  6.  
  7. 	//Find all image contours
  8. 	vector<vector<Point> > contours;
  9. 	findContours(dilated, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
  10.  
  11. 	//For each contour
  12. 	vector<RotatedRect> areas;
  13. 	for (auto contour : contours) 
  14. 	{	
  15. 		//Find it's rotated rect
  16. 		auto box = minAreaRect(contour);
  17.  
  18. 		//Discard very small boxes
  19. 		if (box.size.width < 20 || box.size.height < 20)
  20. 			continue;
  21.  
  22. 		//Discard squares shaped boxes and boxes 
  23. 		//higher than larger
  24. 		double proportion = box.angle < -45.0 ?
  25. 			box.size.height / box.size.width : 
  26. 			box.size.width / box.size.height;
  27.  
  28. 		if (proportion < 2) 
  29. 			continue;
  30.  
  31. 		//Add the box
  32. 		areas.push_back(box);
  33. 	}
  34. 	return areas;
  35. }

Crop

  1. Mat deskewAndCrop(Mat input, const RotatedRect& box)
  2. {
  3. 	double angle = box.angle;	
  4. 	Size2f size = box.size;
  5.  
  6. 	//Adjust the box angle
  7.    if (angle < -45.0) 
  8. 	{
  9.         angle += 90.0;
  10.         std::swap(size.width, size.height);		
  11. 	}
  12.  
  13. 	//Rotate the text according to the angle
  14. 	Mat transform = getRotationMatrix2D(box.center, angle, 1.0);
  15. 	Mat rotated;
  16. 	warpAffine(input, rotated, transform, input.size(), INTER_CUBIC);
  17.  
  18. 	//Crop the result
  19. 	Mat cropped;
  20. 	getRectSubPix(rotated, size, box.center, cropped);
  21. 	copyMakeBorder(cropped,cropped,10,10,10,10,BORDER_CONSTANT,Scalar(0));
  22. 	return cropped;
  23. }

Harris Corner Detector & Shi-Tomasi Corner Detector

Harris Corner

  1.         // Capture the current frame
  2.         cap >> frame;
  3.  
  4.         // Resize the frame
  5.         resize(frame, frame, Size(), scalingFactor, scalingFactor, INTER_AREA);
  6.  
  7.         dst = Mat::zeros(frame.size(), CV_32FC1);
  8.  
  9.         // Convert to grayscale
  10.         cvtColor(frame, frameGray, COLOR_BGR2GRAY );
  11.  
  12.         // Detecting corners
  13.         cornerHarris(frameGray, dst, blockSize, apertureSize, k, BORDER_DEFAULT);
  14.  
  15.         // Normalizing
  16.         normalize(dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat());
  17.         convertScaleAbs(dst_norm, dst_norm_scaled);
  18.  
  19.         // Drawing a circle around corners
  20.         for(int j = 0; j < dst_norm.rows ; j++)
  21.         {
  22.             for(int i = 0; i < dst_norm.cols; i++)
  23.             {
  24.                 if((int)dst_norm.at<float>(j,i) > thresh)
  25.                 {
  26.                     circle(frame, Point(i, j), 8,  Scalar(0,255,0), 2, 8, 0);
  27.                 }
  28.             }
  29.         }

Good Features To Track (Shi-Tomasi)

  1.         // Capture the current frame
  2.         cap >> frame;
  3.  
  4.         // Resize the frame
  5.         resize(frame, frame, Size(), scalingFactor, scalingFactor, INTER_AREA);
  6.  
  7.         // Convert to grayscale
  8.         cvtColor(frame, frameGray, COLOR_BGR2GRAY );
  9.  
  10.         // Initialize the parameters for Shi-Tomasi algorithm
  11.         vector<Point2f> corners;
  12.         double qualityThreshold = 0.02;
  13.         double minDist = 15;
  14.         int blockSize = 5;
  15.         bool useHarrisDetector = false;
  16.         double k = 0.07;
  17.  
  18.         // Clone the input frame
  19.         Mat frameCopy;
  20.         frameCopy = frame.clone();
  21.  
  22.         // Apply corner detection
  23.         goodFeaturesToTrack(frameGray, corners, numCorners, qualityThreshold, minDist, Mat(), blockSize, useHarrisDetector, k);
  24.  
  25.         // Parameters for the circles to display the corners
  26.         int radius = 8;      // radius of the cirles
  27.         int thickness = 2;   // thickness of the circles
  28.         int lineType = 8;
  29.  
  30.         // Draw the detected corners using circles
  31.         for(size_t i = 0; i < corners.size(); i++)
  32.         {
  33.             Scalar color = Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255));
  34.             circle(frameCopy, corners[i], radius, color, thickness, lineType, 0);
  35.         }

Tracking Specific Color / Tracking Object

Tracking Specific Color

  1.         // Define the range of "blue" color in HSV colorspace
  2.         Scalar lowerLimit = Scalar(60,100,100);
  3.         Scalar upperLimit = Scalar(180,255,255);
  4.  
  5.         // Threshold the HSV image to get only blue color
  6.         inRange(hsvImage, lowerLimit, upperLimit, mask);
  7.  
  8.         // Compute bitwise-AND of input image and mask
  9.         bitwise_and(frame, frame, outputImage, mask=mask);
  10.  
  11.         // Run median filter on the output to smoothen it
  12.         medianBlur(outputImage, outputImage, 5);

Tracking Object

  1.         if(trackingFlag)
  2.         {
  3.             // Check for all the values in 'hsvimage' that are within the specified range
  4.             // and put the result in 'mask'
  5.             inRange(hsvImage, Scalar(0, minSaturation, minValue), Scalar(180, 256, maxValue), mask);
  6.  
  7.             // Mix the specified channels
  8.             int channels[] = {0, 0};
  9.             hueImage.create(hsvImage.size(), hsvImage.depth());
  10.             mixChannels(&hsvImage, 1, &hueImage, 1, channels, 1);
  11.  
  12.             if(trackingFlag < 0)
  13.             {
  14.                 // Create images based on selected regions of interest
  15.                 Mat roi(hueImage, selectedRect), maskroi(mask, selectedRect);
  16.  
  17.                 // Compute the histogram and normalize it
  18.                 calcHist(&roi, 1, 0, maskroi, hist, 1, &histSize, &histRanges);
  19.                 normalize(hist, hist, 0, 255, CV_MINMAX);
  20.  
  21.                 trackingRect = selectedRect;
  22.                 trackingFlag = 1;
  23.             }
  24.  
  25.             // Compute the histogram back projection
  26.             calcBackProject(&hueImage, 1, 0, hist, backproj, &histRanges);
  27.             backproj &= mask;
  28.             RotatedRect rotatedTrackingRect = CamShift(backproj, trackingRect, TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1));
  29.  
  30.             // Check if the area of trackingRect is too small
  31.             if(trackingRect.area() <= 1)
  32.             {
  33.                 // Use an offset value to make sure the trackingRect has a minimum size
  34.                 int cols = backproj.cols, rows = backproj.rows;
  35.                 int offset = MIN(rows, cols) + 1;
  36.                 trackingRect = Rect(trackingRect.x - offset, trackingRect.y - offset, trackingRect.x + offset, trackingRect.y + offset) & Rect(0, 0, cols, rows);
  37.             }
  38.  
  39.             // Draw the ellipse on top of the image
  40.             ellipse(image, rotatedTrackingRect, Scalar(0,255,0), 3, CV_AA);
  41.         }

Show histogram & Equalize histogram & Lomography effect & Cartoonize effect

The Original

Show histogram

  1. void showHistoCallback(int state, void* userData)
  2. {
  3.     // Separate image in BRG
  4.     vector<Mat> bgr;
  5.     split( img, bgr );
  6.  
  7.     // Create the histogram for 256 bins
  8.     // The number of possibles values
  9.     int numbins= 256;
  10.  
  11.     /// Set the ranges ( for B,G,R) )
  12.     float range[] = { 0, 256 } ;
  13.     const float* histRange = { range };
  14.  
  15.     Mat b_hist, g_hist, r_hist;
  16.  
  17.     calcHist( &bgr[0], 1, 0, Mat(), b_hist, 1, &numbins, &histRange );
  18.     calcHist( &bgr[1], 1, 0, Mat(), g_hist, 1, &numbins, &histRange );
  19.     calcHist( &bgr[2], 1, 0, Mat(), r_hist, 1, &numbins, &histRange );
  20.  
  21.     // Draw the histogram
  22.     // We go to draw lines for each channel
  23.     int width= 512;
  24.     int height= 300;
  25.     // Create image with gray base
  26.     Mat histImage( height, width, CV_8UC3, Scalar(20,20,20) );
  27.  
  28.     // Normalize the histograms to height of image
  29.     normalize(b_hist, b_hist, 0, height, NORM_MINMAX );
  30.     normalize(g_hist, g_hist, 0, height, NORM_MINMAX );
  31.     normalize(r_hist, r_hist, 0, height, NORM_MINMAX );
  32.  
  33.     int binStep= cvRound((float)width/(float)numbins);
  34.     for( int i=1; i< numbins; i++)
  35.     {
  36.         line( histImage, 
  37.                 Point( binStep*(i-1), height-cvRound(b_hist.at<float>(i-1) ) ),
  38.                 Point( binStep*(i), height-cvRound(b_hist.at<float>(i) ) ),
  39.                 Scalar(255,0,0)
  40.             );
  41.         line( histImage, 
  42.                 Point( binStep*(i-1), height-cvRound(g_hist.at<float>(i-1) ) ),
  43.                 Point( binStep*(i), height-cvRound(g_hist.at<float>(i) ) ),
  44.                 Scalar(0,255,0)
  45.             );
  46.         line( histImage, 
  47.                 Point( binStep*(i-1), height-cvRound(r_hist.at<float>(i-1) ) ),
  48.                 Point( binStep*(i), height-cvRound(r_hist.at<float>(i) ) ),
  49.                 Scalar(0,0,255)
  50.             );
  51.     }
  52.  
  53.     imshow("Histogram", histImage);
  54.  
  55. }

Equalize histogram

  1. void equalizeCallback(int state, void* userData)
  2. {
  3.     Mat result;
  4.     // Convert BGR image to YCbCr
  5.     Mat ycrcb;
  6.     cvtColor( img, ycrcb, COLOR_BGR2YCrCb);
  7.  
  8.     // Split image into channels
  9.     vector<Mat> channels;
  10.     split( ycrcb, channels );
  11.  
  12.     // Equalize the Y channel only
  13.     equalizeHist( channels[0], channels[0] );
  14.  
  15.     // Merge the result channels
  16.     merge( channels, ycrcb );
  17.  
  18.     // Convert color ycrcb to BGR
  19.     cvtColor( ycrcb, result, COLOR_YCrCb2BGR );
  20.  
  21.     // Show image
  22.     imshow("Equalized", result);
  23. }

Lomography effect

  1. void lomoCallback(int state, void* userData)
  2. {
  3.     Mat result;
  4.  
  5.     const double E = std::exp(1.0);
  6.     // Create Lookup table for color curve effect
  7.     Mat lut(1, 256, CV_8UC1);
  8.     for (int i=0; i<256; i++)
  9.     {
  10.         float x= (float)i/256.0; 
  11.         lut.at<uchar>(i)= cvRound( 256 * (1/(1 + pow(E, -((x-0.5)/0.1)) )) );
  12.     }
  13.  
  14.     // Split the image channels and apply curve transform only to red channel
  15.     vector<Mat> bgr;
  16.     split(img, bgr);
  17.     LUT(bgr[2], lut, bgr[2]);
  18.     // merge result
  19.     merge(bgr, result);
  20.  
  21.     // Create image for halo dark
  22.     Mat halo( img.rows, img.cols, CV_32FC3, Scalar(0.3,0.3,0.3) );
  23.     // Create circle 
  24.     circle(halo, Point(img.cols/2, img.rows/2), img.cols/3, Scalar(1,1,1), -1); 
  25.     blur(halo, halo, Size(img.cols/3, img.cols/3));
  26.  
  27.     // Convert the result to float to allow multiply by 1 factor
  28.     Mat resultf;
  29.     result.convertTo(resultf, CV_32FC3);
  30.  
  31.     // Multiply our result with halo
  32.     multiply(resultf, halo, resultf);
  33.  
  34.     // convert to 8 bits
  35.     resultf.convertTo(result, CV_8UC3);
  36.  
  37.     // show result
  38.     imshow("Lomograpy", result);
  39.  
  40.     // Release mat memory
  41.     halo.release();
  42.     resultf.release();
  43.     lut.release();
  44.     bgr[0].release();
  45.     bgr[1].release();
  46.     bgr[2].release();
  47. }

Cartoonize effect

  1. void cartoonCallback(int state, void* userData)
  2. {
  3.     /** EDGES **/
  4.     // Apply median filter to remove possible noise
  5.     Mat imgMedian;
  6.     medianBlur(img, imgMedian, 7);
  7.  
  8.     // Detect edges with canny
  9.     Mat imgCanny;
  10.     Canny(imgMedian, imgCanny, 50, 150);
  11.  
  12.     // Dilate the edges
  13.     Mat kernel= getStructuringElement(MORPH_RECT, Size(2,2));
  14.     dilate(imgCanny, imgCanny, kernel);
  15.  
  16.     // Scale edges values to 1 and invert values
  17.     imgCanny= imgCanny/255;
  18.     imgCanny= 1-imgCanny;
  19.  
  20.     // Use float values to allow multiply between 0 and 1
  21.     Mat imgCannyf;
  22.     imgCanny.convertTo(imgCannyf, CV_32FC3);
  23.  
  24.     // Blur the edgest to do smooth effect
  25.     blur(imgCannyf, imgCannyf, Size(5,5));
  26.  
  27.     /** COLOR **/
  28.     // Apply bilateral filter to homogenizes color
  29.     Mat imgBF;
  30.     bilateralFilter(img, imgBF, 9, 150.0, 150.0);
  31.  
  32.     // truncate colors
  33.     Mat result= imgBF/25;
  34.     result= result*25;
  35.  
  36.     /** MERGES COLOR + EDGES **/
  37.     // Create a 3 channles for edges
  38.     Mat imgCanny3c;
  39.     Mat cannyChannels[]={ imgCannyf, imgCannyf, imgCannyf};
  40.     merge(cannyChannels, 3, imgCanny3c);
  41.  
  42.     // Convert color result to float 
  43.     Mat resultf;
  44.     result.convertTo(resultf, CV_32FC3);
  45.  
  46.     // Multiply color and edges matrices
  47.     multiply(resultf, imgCanny3c, resultf);
  48.  
  49.     // convert to 8 bits color
  50.     resultf.convertTo(result, CV_8UC3);
  51.  
  52.     // Show image
  53.     imshow("Result", result);
  54.  
  55. }

Slider & Mouse & Button(QT_CHECKBOX, QT_RADIOBOX, QT_PUSH_BUTTON)

Slider

  1. createTrackbar("Lena", "Lena", &blurAmount, 30, onChange, &lena);
  2.  
  3. onChange(blurAmount, &lena);
  4.  
  5. static void onChange(int pos, void* userInput)
  6. {
  7. 	if(pos <= 0)
  8. 		return;
  9. 	// Aux variable for result
  10. 	Mat imgBlur;
  11.  
  12. 	// Get the pointer input image
  13. 	Mat* img= (Mat*)userInput;
  14.  
  15. 	// Apply a blur filter
  16. 	blur(*img, imgBlur, Size(pos, pos));	
  17.  
  18. 	// Show the result
  19. 	imshow("Lena", imgBlur);
  20. }

Mouse

  1. setMouseCallback("Lena", onMouse, &lena);
  2.  
  3. static void onMouse( int event, int x, int y, int, void* userInput )
  4. {
  5. 	if( event != EVENT_LBUTTONDOWN )
  6. 	        return;
  7.  
  8. 	// Get the pointer input image
  9. 	Mat* img= (Mat*)userInput;
  10.  
  11. 	// Draw circle
  12. 	circle(*img, Point(x, y), 10, Scalar(0,255,0), 3);
  13.  
  14. 	// Call on change to get blurred image
  15. 	onChange(blurAmount, img);
  16.  
  17. }

Button(QT_CHECKBOX, QT_RADIOBOX, QT_PUSH_BUTTON)

  1.  
  2. void grayCallback(int state, void* userData)
  3. {
  4. 	applyGray= true;
  5. 	applyFilters();
  6. }
  7. void bgrCallback(int state, void* userData)
  8. {
  9. 	applyGray= false;
  10. 	applyFilters();
  11. }
  12.  
  13. void blurCallback(int state, void* userData)
  14. {
  15. 	applyBlur= (bool)state;
  16. 	applyFilters();
  17. }
  18.  
  19. void sobelCallback(int state, void* userData)
  20. {
  21. 	applySobel= !applySobel;
  22. 	applyFilters();
  23. }
  24.  
  25. 	createButton("Blur", blurCallback, NULL, QT_CHECKBOX, 0);
  26.  
  27. 	createButton("Gray",grayCallback,NULL,QT_RADIOBOX, 0);
  28. 	createButton("RGB",bgrCallback,NULL,QT_RADIOBOX, 1);
  29.  
  30. 	createButton("Sobel",sobelCallback,NULL,QT_PUSH_BUTTON, 0);

OpenCV Simple GUI / QT

Simple Window

namedWindow("Photo", WINDOW_AUTOSIZE);
 
moveWindow("Photo", 520, 10);
 
imshow("Photo", photo);
 
resizeWindow("Lena", 512, 512);
 
destroyWindow("Photo");
 
destroyAllWindows();//

Qt Functions

displayOverlay("Lena", "Overlay 5secs", 5000);
 
displayStatusBar("Lena", "Status bar 5secs", 5000);
 
saveWindowParameters("Lena");
 
loadWindowParameters("Lena");