Water's Home

Just another Life Style

0%

Remove Noise

Mat img_noise, img_box_smooth;
medianBlur(img, img_noise, 3);
blur(img, img_box_smooth, Size(3,3));

Remove Light

// Load image to process
Mat light_pattern= imread(light_pattern_file, 0);
if(light_pattern.data==NULL){
// Calculate light pattern
light_pattern= calculateLightPattern(img_noise);
}
medianBlur(light_pattern, light_pattern, 3);

//Apply the light pattern
Mat img_no_light;
img_noise.copyTo(img_no_light);
if(method_light!=2){
img_no_light= removeLight(img_noise, light_pattern, method_light);
}

Binarize

// Binarize image for segment
Mat img_thr;
if(method_light!=2){
threshold(img_no_light, img_thr, 30, 255, THRESH_BINARY);
}else{
threshold(img_no_light, img_thr, 140, 255, THRESH_BINARY_INV);
}

Connected Components

void ConnectedComponents(Mat img)
{
// Use connected components to divide our possibles parts of images
Mat labels;
int num_objects= connectedComponents(img, labels);
// Check the number of objects detected
if(num_objects < 2 ){
cout << “No objects detected” << endl;
return;
}else{
cout << “Number of objects detected: “ << num_objects - 1 << endl;
}
// Create output image coloring the objects
Mat output= Mat::zeros(img.rows,img.cols, CV_8UC3);
RNG rng( 0xFFFFFFFF );
for(int i=1; iaddImage(“Result”, output);
}

void ConnectedComponentsStats(Mat img)
{
// Use connected components with stats
Mat labels, stats, centroids;
int num_objects= connectedComponentsWithStats(img, labels, stats, centroids);
// Check the number of objects detected
if(num_objects < 2 ){
cout << “No objects detected” << endl;
return;
}else{
cout << “Number of objects detected: “ << num_objects - 1 << endl;
}
// Create output image coloring the objects and show area
Mat output= Mat::zeros(img.rows,img.cols, CV_8UC3);
RNG rng( 0xFFFFFFFF );
for(int i=1; i(i) << “ with area “ << stats.at(i, CC_STAT_AREA) << endl;
Mat mask= labels==i;
output.setTo(randomColor(rng), mask);
// draw text with area
stringstream ss;
ss << “area: “ << stats.at(i, CC_STAT_AREA);

putText(output, 
  ss.str(), 
  centroids.at(i), 
  FONT\_HERSHEY\_SIMPLEX, 
  0.4, 
  Scalar(255,255,255));

}
imshow(“Result”, output);
miw->addImage(“Result”, output);
}

Find Contours Basic

void FindContoursBasic(Mat img)
{
vector > contours;
findContours(img, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
Mat output= Mat::zeros(img.rows,img.cols, CV_8UC3);
// Check the number of objects detected
if(contours.size() == 0 ){
cout << “No objects detected” << endl;
return;
}else{
cout << “Number of objects detected: “ << contours.size() << endl;
}
RNG rng( 0xFFFFFFFF );
for(int i=0; iaddImage(“Result”, output);
}

Separate Channels

vector separateChannels(Mat& src)
{
vector channels;
//Grayscale images
if (src.type() == CV_8U src.type() == CV_8UC1) {
channels.push_back(src);
channels.push_back(255-src);
return channels;
}

//Colored images
if (src.type() == CV_8UC3) {
computeNMChannels(src, channels);
int size = static_cast(channels.size())-1;
for (int c = 0; c < size; c++)
channels.push_back(255-channels[c]);
return channels;
}

//Other types
cout << “Invalid image format!” << endl;
exit(-1);
}

Detection & Draw Groups Boxes

//Convert the input image to grayscale.
//Just do Mat processed = input; to work with colors.
Mat processed;
cvtColor(input, processed, CV\_RGB2GRAY);

auto channels = separateChannels(processed);

// Create ERFilter objects with the 1st and 2nd stage classifiers
auto filter1 = createERFilterNM1(loadClassifierNM1("trained\_classifierNM1.xml"),15,0.00015f,0.13f,0.2f,true,0.1f);
auto filter2 = createERFilterNM2(loadClassifierNM2("trained\_classifierNM2.xml"),0.5);

//Extract text regions using Newmann & Matas algorithm
cout << "Processing " << channels.size() << " channels..." << endl;
vector > regions(channels.size());
for (int c=0; c < channels.size(); c++)
{
    cout << "    Channel " << (c+1) << endl;
    filter1->run(channels\[c\], regions\[c\]);
    filter2->run(channels\[c\], regions\[c\]);
}
filter1.release();
filter2.release();

//Separate character groups from regions
vector< vector > groups;
vector groupRects;
erGrouping(input, channels, regions, groups, groupRects, ERGROUPING\_ORIENTATION\_HORIZ);
//erGrouping(input, channels, regions, groups, groupRects, ERGROUPING\_ORIENTATION\_ANY, "trained\_classifier\_erGrouping.xml", 0.5);

// draw groups boxes
for (auto rect : groupRects)
    rectangle(input, rect, Scalar(0, 255, 0), 3); 

Get Extremal Region

Mat drawER(const vector &channels, const vector > ®ions, const vector& group, const Rect& rect)
{
Mat out = Mat::zeros(channels[0].rows+2, channels[0].cols+2, CV_8UC1);

int flags = 4//4 neighbors
            + (255 << 8)//paint mask in white (255)
  • FLOODFILL_FIXED_RANGE//fixed range

  • FLOODFILL_MASK_ONLY;//Paint just the mask

    for (int g=0; g < group.size(); g++)
    {
    int idx = group[g][0];
    ERStat er = regions[idx][group[g][1]];

    //Ignore root region
    if (er.parent == NULL)
    continue;
    //Transform the linear pixel value to row and col
    int px = er.pixel % channels[idx].cols;
    int py = er.pixel / channels[idx].cols;

    //Create the point and adds it to the list.
    Point p(px, py);

    //Draw the extremal region
    floodFill(
    channels[idx], out,//Image and mask
    p, Scalar(255),//Seed and color
    nullptr,//No rect
    Scalar(er.level),Scalar(0),//LoDiff and upDiff
    flags//Flags
    );
    }

    //Crop just the text area and find it’s points
    out = out(rect);

    vector points;
    findNonZero(out, points);
    //Use deskew and crop to crop it perfectly
    return deskewAndCrop(out, minAreaRect(points));

}

Create ERFilter

// Create ERFilter objects with the 1st and 2nd stage classifiers
auto filter1 = createERFilterNM1(loadClassifierNM1("trained\_classifierNM1.xml"),15,0.00015f,0.13f,0.2f,true,0.1f);
auto filter2 = createERFilterNM2(loadClassifierNM2("trained\_classifierNM2.xml"),0.5);

//Extract text regions using Newmann & Matas algorithm
cout << "Processing " << channels.size() << " channels..." << endl;
vector > regions(channels.size());
for (int c=0; c < channels.size(); c++)
{
    cout << "    Channel " << (c+1) << endl;
    filter1->run(channels\[c\], regions\[c\]);
    filter2->run(channels\[c\], regions\[c\]);
}
filter1.release();
filter2.release(); 

Separate Characters & OCR->Run

//Separate character groups from regions
vector< vector > groups;
vector groupRects;
erGrouping(input, channels, regions, groups, groupRects, ERGROUPING\_ORIENTATION\_HORIZ);
//erGrouping(input, channels, regions, groups, groupRects, ERGROUPING\_ORIENTATION\_ANY, "trained\_classifier\_erGrouping.xml", 0.5);

// text detection
cout << endl << "Detected text:" << endl;
cout << "-------------" << endl;
auto ocr = initOCR("tesseract");
for (int i = 0; i < groups.size(); i++)
{
    Mat wordImage = drawER(channels, regions, groups\[i\], groupRects\[i\]);

    string word;
    ocr->run(wordImage, word);
    cout << word << endl;
}

Tesseract API — OCR

tesseract::TessBaseAPI ocr;

char* identifyText(Mat input, char* language = “eng”)
{
ocr.Init(NULL, language, tesseract::OEM_TESSERACT_ONLY);
ocr.SetVariable(“tessedit_write_images”, “1”);
ocr.SetPageSegMode(tesseract::PSM_SINGLE_BLOCK);
ocr.SetImage(input.data, input.cols, input.rows, input.elemSize(), input.cols);

char* text = ocr.GetUTF8Text();
cout << “Text:” << endl;
cout << text << endl;
cout << “Confidence: “ << ocr.MeanTextConf() << endl << endl;

// Get the text
return text;
}

Binarize

Mat binarize(Mat input)
{
//Uses otsu to threshold the input image
Mat binaryImage;
cvtColor(input, input, CV_BGR2GRAY);
threshold(input, binaryImage, 0, 255, THRESH_OTSU);

//Count the number of black and white pixels
int white = countNonZero(binaryImage);
int black = binaryImage.size().area() - white;

//If the image is mostly white (white background), invert it
return white < black ? binaryImage : ~binaryImage;
}

Dilate & Find Contours

vector findTextAreas(Mat input) {
//Dilate the image
Mat kernel = getStructuringElement(MORPH_CROSS, Size(3,3));
Mat dilated;
dilate(input, dilated, kernel, cv::Point(-1, -1), 5);

//Find all image contours
vector > contours;
findContours(dilated, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);

//For each contour
vector areas;
for (auto contour : contours)
{
//Find it’s rotated rect
auto box = minAreaRect(contour);

//Discard very small boxes
if (box.size.width < 20 box.size.height < 20)
continue;

//Discard squares shaped boxes and boxes
//higher than larger
double proportion = box.angle < -45.0 ?
box.size.height / box.size.width :
box.size.width / box.size.height;

if (proportion < 2)
continue;

//Add the box
areas.push_back(box);
}
return areas;
}

Crop

Mat deskewAndCrop(Mat input, const RotatedRect& box)
{
double angle = box.angle;
Size2f size = box.size;

//Adjust the box angle
if (angle < -45.0)
{
angle += 90.0;
std::swap(size.width, size.height);
}

//Rotate the text according to the angle
Mat transform = getRotationMatrix2D(box.center, angle, 1.0);
Mat rotated;
warpAffine(input, rotated, transform, input.size(), INTER_CUBIC);

//Crop the result
Mat cropped;
getRectSubPix(rotated, size, box.center, cropped);
copyMakeBorder(cropped,cropped,10,10,10,10,BORDER_CONSTANT,Scalar(0));
return cropped;
}

Prepare

Windows 10 Pro 1709 Visual Studio 2017 openssl-1.1.0h.tar.gz ActivePerl-5.26.1.2601-MSWin32-x64-404865.exe

Install dmake

ppm install dmake

Compile OpenSSL x64 Release DLL

C:\>cd openssl-1.1.0h
C:\openssl-1.1.0h>perl Configure VC-WIN64A no-asm –prefix=C:/openssl –openssldir=C:/openssl/ssl
C:\openssl-1.1.0h>nmake
C:\openssl-1.1.0h>nmake test
C:\openssl-1.1.0h>nmake install
C:\openssl-1.1.0h>nmake clean

Have A Look

C:\openssl> . ├─bin │ libcrypto-1_1-x64.dll │ libssl-1_1-x64.dll │ ├─include │ └─openssl │ *.h │ ├─lib │ │ libcrypto.lib │ │ libssl.lib

VS2017 Project Properties

Test Source Code

#include #include #include #include #include const int RECV_SIZE = 8192;

//Delete the UnionPay Merchant Information

int main()
{
WSADATA wsadData;
WSAStartup(MAKEWORD(2, 2), &wsadData);

ADDRINFOT aiHints;
ZeroMemory(&aiHints, sizeof(ADDRINFOT));
aiHints.ai_family = AF_INET;
aiHints.ai_flags = AI_PASSIVE;
aiHints.ai_protocol = 0;
aiHints.ai_socktype = SOCK_STREAM;

PADDRINFOT paiResult;
GetAddrInfo(wstrHost.c_str(), NULL, &aiHints, &paiResult);

SOCKET sSocket = socket(AF_INET, SOCK_STREAM, 0);
if (sSocket == SOCKET_ERROR)
{
std::wcout << “Error socket” << std::endl;
return -1;
}

struct sockaddr_in sinHost;
sinHost.sin_addr.s_addr = inet_addr(ip);
sinHost.sin_family = AF_INET;
sinHost.sin_port = htons(port);
if (connect(sSocket, (LPSOCKADDR)&sinHost, sizeof(SOCKADDR_IN)) == SOCKET_ERROR)
{
std::wcout << “Error connect” << std::endl;
return -1;
}

SSL_library_init();
SSLeay_add_ssl_algorithms();
SSL_load_error_strings();

SSL_CTX *pctxSSL = SSL_CTX_new(TLSv1_2_client_method());
if (pctxSSL == NULL)
{
std::wcout << “Error SSL_CTX_new” << std::endl;
return -1;
}

if (SSL_CTX_use_certificate_file(pctxSSL, “./o(* ̄︶ ̄*)o.pem”, SSL_FILETYPE_PEM) <= 0)
{
exit(1);
}

SSL *psslSSL = SSL_new(pctxSSL);
if (psslSSL == NULL)
{
std::wcout << “Error SSL_new” << std::endl;
return -1;
}
SSL_set_fd(psslSSL, sSocket);
INT iErrorConnect = SSL_connect(psslSSL);
if (iErrorConnect < 0)
{
std::wcout << “Error SSL_connect, iErrorConnect=” << iErrorConnect << std::endl;
return -1;
}
std::wcout << “SSL connection using “ << SSL_get_cipher(psslSSL) << std::endl;

//Delete the HTTP Header

INT iErrorWrite = SSL_write(psslSSL, req, req_len) < 0;
if (iErrorWrite < 0)
{
std::wcout << “Error SSL_write” << std::endl;
return -1;
}

LPSTR lpszRead = new CHAR[RECV_SIZE];
INT iLength = 1;
//while (iLength >= 1)
{
iLength = SSL_read(psslSSL, lpszRead, RECV_SIZE - 1);
if (iLength < 0)
{
std::wcout << “Error SSL_read” << std::endl;
delete[] lpszRead;
return -1;
}
lpszRead[iLength] = TEXT(‘\0’);
std::wcout << lpszRead;

{
char *token = strtok(lpszRead, “\n”);
char *p_Length = NULL;
while (token != NULL) {
printf(“%s\n”, token);
if (p_Length = strstr(token, “Content-Length: “)) {
recv_len = atoi(&p_Length[strlen(“Content-Length: “)]);
memcpy(recv_data, &lpszRead[iLength - recv_len], recv_len);
break;
}
token = strtok(NULL, “\n”);
}
}

{
int i;
printf(“<— (%d): \n”, recv_len);
for (i = 0; i < recv_len; i++) {
printf(“%02X “, recv_data[i]);
}
printf(“\n”);
}

}
delete[] lpszRead;

return 0;
}

Test Code

Harris Corner

    // Capture the current frame
    cap >> frame;
    
    // Resize the frame
    resize(frame, frame, Size(), scalingFactor, scalingFactor, INTER\_AREA);
    
    dst = Mat::zeros(frame.size(), CV\_32FC1);
    
    // Convert to grayscale
    cvtColor(frame, frameGray, COLOR\_BGR2GRAY );
    
    // Detecting corners
    cornerHarris(frameGray, dst, blockSize, apertureSize, k, BORDER\_DEFAULT);
    
    // Normalizing
    normalize(dst, dst\_norm, 0, 255, NORM\_MINMAX, CV\_32FC1, Mat());
    convertScaleAbs(dst\_norm, dst\_norm\_scaled);
    
    // Drawing a circle around corners
    for(int j = 0; j < dst\_norm.rows ; j++)
    {
        for(int i = 0; i < dst\_norm.cols; i++)
        {
            if((int)dst\_norm.at(j,i) > thresh)
            {
                circle(frame, Point(i, j), 8,  Scalar(0,255,0), 2, 8, 0);
            }
        }
    } 

Good Features To Track (Shi-Tomasi)

    // Capture the current frame
    cap >> frame;
    
    // Resize the frame
    resize(frame, frame, Size(), scalingFactor, scalingFactor, INTER\_AREA);
    
    // Convert to grayscale
    cvtColor(frame, frameGray, COLOR\_BGR2GRAY );
    
    // Initialize the parameters for Shi-Tomasi algorithm
    vector corners;
    double qualityThreshold = 0.02;
    double minDist = 15;
    int blockSize = 5;
    bool useHarrisDetector = false;
    double k = 0.07;
    
    // Clone the input frame
    Mat frameCopy;
    frameCopy = frame.clone();
    
    // Apply corner detection
    goodFeaturesToTrack(frameGray, corners, numCorners, qualityThreshold, minDist, Mat(), blockSize, useHarrisDetector, k);
    
    // Parameters for the circles to display the corners
    int radius = 8;      // radius of the cirles
    int thickness = 2;   // thickness of the circles
    int lineType = 8;
    
    // Draw the detected corners using circles
    for(size\_t i = 0; i < corners.size(); i++)
    {
        Scalar color = Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255));
        circle(frameCopy, corners\[i\], radius, color, thickness, lineType, 0);
    }

Tracking Specific Color

    // Define the range of "blue" color in HSV colorspace
    Scalar lowerLimit = Scalar(60,100,100);
    Scalar upperLimit = Scalar(180,255,255);
    
    // Threshold the HSV image to get only blue color
    inRange(hsvImage, lowerLimit, upperLimit, mask);
    
    // Compute bitwise-AND of input image and mask
    bitwise\_and(frame, frame, outputImage, mask=mask);
    
    // Run median filter on the output to smoothen it
    medianBlur(outputImage, outputImage, 5);

Tracking Object

    if(trackingFlag)
    {
        // Check for all the values in 'hsvimage' that are within the specified range
        // and put the result in 'mask'
        inRange(hsvImage, Scalar(0, minSaturation, minValue), Scalar(180, 256, maxValue), mask);
        
        // Mix the specified channels
        int channels\[\] = {0, 0};
        hueImage.create(hsvImage.size(), hsvImage.depth());
        mixChannels(&hsvImage, 1, &hueImage, 1, channels, 1);
        
        if(trackingFlag < 0)
        {
            // Create images based on selected regions of interest
            Mat roi(hueImage, selectedRect), maskroi(mask, selectedRect);
            
            // Compute the histogram and normalize it
            calcHist(&roi, 1, 0, maskroi, hist, 1, &histSize, &histRanges);
            normalize(hist, hist, 0, 255, CV\_MINMAX);
            
            trackingRect = selectedRect;
            trackingFlag = 1;
        }
        
        // Compute the histogram back projection
        calcBackProject(&hueImage, 1, 0, hist, backproj, &histRanges);
        backproj &= mask;
        RotatedRect rotatedTrackingRect = CamShift(backproj, trackingRect, TermCriteria(CV\_TERMCRIT\_EPS  CV\_TERMCRIT\_ITER, 10, 1));
        
        // Check if the area of trackingRect is too small
        if(trackingRect.area() <= 1)
        {
            // Use an offset value to make sure the trackingRect has a minimum size
            int cols = backproj.cols, rows = backproj.rows;
            int offset = MIN(rows, cols) + 1;
            trackingRect = Rect(trackingRect.x - offset, trackingRect.y - offset, trackingRect.x + offset, trackingRect.y + offset) & Rect(0, 0, cols, rows);
        }
        
        // Draw the ellipse on top of the image
        ellipse(image, rotatedTrackingRect, Scalar(0,255,0), 3, CV\_AA);
    }

The Original

Show histogram

void showHistoCallback(int state, void* userData)
{
// Separate image in BRG
vector bgr;
split( img, bgr );

// Create the histogram for 256 bins
// The number of possibles values
int numbins= 256;

/// Set the ranges ( for B,G,R) )
float range\[\] = { 0, 256 } ;
const float\* histRange = { range };

Mat b\_hist, g\_hist, r\_hist;

calcHist( &bgr\[0\], 1, 0, Mat(), b\_hist, 1, &numbins, &histRange );
calcHist( &bgr\[1\], 1, 0, Mat(), g\_hist, 1, &numbins, &histRange );
calcHist( &bgr\[2\], 1, 0, Mat(), r\_hist, 1, &numbins, &histRange );

// Draw the histogram
// We go to draw lines for each channel
int width= 512;
int height= 300;
// Create image with gray base
Mat histImage( height, width, CV\_8UC3, Scalar(20,20,20) );

// Normalize the histograms to height of image
normalize(b\_hist, b\_hist, 0, height, NORM\_MINMAX );
normalize(g\_hist, g\_hist, 0, height, NORM\_MINMAX );
normalize(r\_hist, r\_hist, 0, height, NORM\_MINMAX );

int binStep= cvRound((float)width/(float)numbins);
for( int i=1; i< numbins; i++)
{
    line( histImage, 
            Point( binStep\*(i-1), height-cvRound(b\_hist.at(i-1) ) ),
            Point( binStep\*(i), height-cvRound(b\_hist.at(i) ) ),
            Scalar(255,0,0)
        );
    line( histImage, 
            Point( binStep\*(i-1), height-cvRound(g\_hist.at(i-1) ) ),
            Point( binStep\*(i), height-cvRound(g\_hist.at(i) ) ),
            Scalar(0,255,0)
        );
    line( histImage, 
            Point( binStep\*(i-1), height-cvRound(r\_hist.at(i-1) ) ),
            Point( binStep\*(i), height-cvRound(r\_hist.at(i) ) ),
            Scalar(0,0,255)
        );
}

imshow("Histogram", histImage);

}

Equalize histogram

void equalizeCallback(int state, void* userData)
{
Mat result;
// Convert BGR image to YCbCr
Mat ycrcb;
cvtColor( img, ycrcb, COLOR_BGR2YCrCb);

// Split image into channels
vector channels;
split( ycrcb, channels );

// Equalize the Y channel only
equalizeHist( channels\[0\], channels\[0\] );

// Merge the result channels
merge( channels, ycrcb );

// Convert color ycrcb to BGR
cvtColor( ycrcb, result, COLOR\_YCrCb2BGR );

// Show image
imshow("Equalized", result);

}

Lomography effect

void lomoCallback(int state, void* userData)
{
Mat result;

const double E = std::exp(1.0);
// Create Lookup table for color curve effect
Mat lut(1, 256, CV\_8UC1);
for (int i=0; i<256; i++)
{
    float x= (float)i/256.0; 
    lut.at(i)= cvRound( 256 \* (1/(1 + pow(E, -((x-0.5)/0.1)) )) );
}

// Split the image channels and apply curve transform only to red channel
vector bgr;
split(img, bgr);
LUT(bgr\[2\], lut, bgr\[2\]);
// merge result
merge(bgr, result);

// Create image for halo dark
Mat halo( img.rows, img.cols, CV\_32FC3, Scalar(0.3,0.3,0.3) );
// Create circle 
circle(halo, Point(img.cols/2, img.rows/2), img.cols/3, Scalar(1,1,1), -1); 
blur(halo, halo, Size(img.cols/3, img.cols/3));

// Convert the result to float to allow multiply by 1 factor
Mat resultf;
result.convertTo(resultf, CV\_32FC3);

// Multiply our result with halo
multiply(resultf, halo, resultf);

// convert to 8 bits
resultf.convertTo(result, CV\_8UC3);

// show result
imshow("Lomograpy", result);

// Release mat memory
halo.release();
resultf.release();
lut.release();
bgr\[0\].release();
bgr\[1\].release();
bgr\[2\].release();

}

Cartoonize effect

void cartoonCallback(int state, void* userData)
{
/** EDGES **/
// Apply median filter to remove possible noise
Mat imgMedian;
medianBlur(img, imgMedian, 7);

// Detect edges with canny
Mat imgCanny;
Canny(imgMedian, imgCanny, 50, 150);

// Dilate the edges
Mat kernel= getStructuringElement(MORPH\_RECT, Size(2,2));
dilate(imgCanny, imgCanny, kernel);

// Scale edges values to 1 and invert values
imgCanny= imgCanny/255;
imgCanny= 1-imgCanny;

// Use float values to allow multiply between 0 and 1
Mat imgCannyf;
imgCanny.convertTo(imgCannyf, CV\_32FC3);

// Blur the edgest to do smooth effect
blur(imgCannyf, imgCannyf, Size(5,5));

/\*\* COLOR \*\*/
// Apply bilateral filter to homogenizes color
Mat imgBF;
bilateralFilter(img, imgBF, 9, 150.0, 150.0);

// truncate colors
Mat result= imgBF/25;
result= result\*25;

/\*\* MERGES COLOR + EDGES \*\*/
// Create a 3 channles for edges
Mat imgCanny3c;
Mat cannyChannels\[\]={ imgCannyf, imgCannyf, imgCannyf};
merge(cannyChannels, 3, imgCanny3c);

// Convert color result to float 
Mat resultf;
result.convertTo(resultf, CV\_32FC3);

// Multiply color and edges matrices
multiply(resultf, imgCanny3c, resultf);

// convert to 8 bits color
resultf.convertTo(result, CV\_8UC3);

// Show image
imshow("Result", result);

}

Refer to “OPENCV —> OpenCV 3 on macOS High Sierra —> 2.How to Install OpenCV3 WITH_VTK WITH_OPENGL on macOS High Sierra 10.13.3”