Category: OpenCV
Use SURF Detector to Match Login View in DeXing APP
First, Get the KeyPoint
Second, Capture the screen with your camera
Finally, Get the result with SURF Detector
Matplotlib RGB & OpenCV BGR
Matplotlib RGB & OpenCV BGR
import matplotlib.pyplot as plt
import numpy as np
import cv2
img = np.array([
[[255, 0, 0], [0, 255, 0], [0, 0, 255]],
[[255, 255, 0], [255, 0, 255], [0, 255, 255]],
[[255, 255, 255], [128, 128, 128], [0, 0, 0]],
], dtype=np.uint8)
plt.imsave('img_pyplot.png', img)
cv2.imwrite('img_cv2.jpg', img)
QT + OpenCV Example
Source
#include <QCoreApplication>
#include <opencv2/opencv.hpp>
int main(int argc, char *argv[])
{
//QCoreApplication a(argc, argv);
using namespace cv;
Mat image = imread("/Users/water/Downloads/Share.jpg");
imshow("Output", image);
waitKey(1000);
return 0;
//return a.exec();
}
pro
QT -= gui
CONFIG += c++11 console
CONFIG -= app_bundle
# The following define makes your compiler emit warnings if you use
# any Qt feature that has been marked deprecated (the exact warnings
# depend on your compiler). Please consult the documentation of the
# deprecated API in order to know how to port your code away from it.
DEFINES += QT_DEPRECATED_WARNINGS
# You can also make your code fail to compile if it uses deprecated APIs.
# In order to do so, uncomment the following line.
# You can also select to disable deprecated APIs only up to a certain version of Qt.
#DEFINES += QT_DISABLE_DEPRECATED_BEFORE=0x060000 # disables all the APIs deprecated before Qt 6.0.0
SOURCES += \
main.cpp
# Default rules for deployment.
qnx: target.path = /tmp/$${TARGET}/bin
else: unix:!android: target.path = /opt/$${TARGET}/bin
!isEmpty(target.path): INSTALLS += target
INCLUDEPATH += /usr/local/include
INCLUDEPATH += /usr/local/include/opencv
INCLUDEPATH += /usr/local/include/opencv2
LIBS += -L/usr/local/lib \
-lopencv_core \
-lopencv_highgui \
-lopencv_imgproc \
-lopencv_imgcodecs \
OpenCV3 On ARM
Source Code Dir
CMake-GUI Configure
Configure/Generate
Unix Makefiles
Specify options for cross-compiling
Choose ARM – LINUX
Operating System : arm-linux
C Compilers :
C++ Compilers :
Target Root : The Cross Compiler BIN Directory
CMAKE_INSTALL_PREFIX
make & make install
make
make install
Maybe Need It !
<>
The END
tar jcvf opencv-arm.tar.bz2 opencv-arm/
Lucas-Kanade & Farneback
LucasKanade Tracker
// Check if there are points to track
if(!trackingPoints[0].empty())
{
// Status vector to indicate whether the flow for the corresponding features has been found
vector<uchar> statusVector;
// Error vector to indicate the error for the corresponding feature
vector<float> errorVector;
// Check if previous image is empty
if(prevGrayImage.empty())
{
curGrayImage.copyTo(prevGrayImage);
}
// Calculate the optical flow using Lucas-Kanade algorithm
calcOpticalFlowPyrLK(prevGrayImage, curGrayImage, trackingPoints[0], trackingPoints[1], statusVector, errorVector, windowSize, 3, terminationCriteria, 0, 0.001);
int count = 0;
// Minimum distance between any two tracking points
int minDist = 7;
for(int i=0; i < trackingPoints[1].size(); i++)
{
if(pointTrackingFlag)
{
// If the new point is within 'minDist' distance from an existing point, it will not be tracked
if(norm(currentPoint - trackingPoints[1][i]) <= minDist)
{
pointTrackingFlag = false;
continue;
}
}
// Check if the status vector is good
if(!statusVector[i])
continue;
trackingPoints[1][count++] = trackingPoints[1][i];
// Draw a filled circle for each of the tracking points
int radius = 8;
int thickness = 2;
int lineType = 8;
circle(image, trackingPoints[1][i], radius, Scalar(0,255,0), thickness, lineType);
}
trackingPoints[1].resize(count);
}
// Refining the location of the feature points
if(pointTrackingFlag && trackingPoints[1].size() < maxNumPoints)
{
vector<Point2f> tempPoints;
tempPoints.push_back(currentPoint);
// Function to refine the location of the corners to subpixel accuracy.
// Here, 'pixel' refers to the image patch of size 'windowSize' and not the actual image pixel
cornerSubPix(curGrayImage, tempPoints, windowSize, cvSize(-1,-1), terminationCriteria);
trackingPoints[1].push_back(tempPoints[0]);
pointTrackingFlag = false;
}
Farneback Tracker
// Check if the image is valid
if(prevGray.data)
{
// Initialize parameters for the optical flow algorithm
float pyrScale = 0.5;
int numLevels = 3;
int windowSize = 15;
int numIterations = 3;
int neighborhoodSize = 5;
float stdDeviation = 1.2;
// Calculate optical flow map using Farneback algorithm
calcOpticalFlowFarneback(prevGray, curGray, flowImage, pyrScale, numLevels, windowSize, numIterations, neighborhoodSize, stdDeviation, OPTFLOW_USE_INITIAL_FLOW);
// Convert to 3-channel RGB
cvtColor(prevGray, flowImageGray, COLOR_GRAY2BGR);
// Draw the optical flow map
drawOpticalFlow(flowImage, flowImageGray);
// Display the output image
imshow(windowName, flowImageGray);
}
Other Morphological Operators
Opening
Mat performOpening(Mat inputImage, int morphologyElement, int morphologySize)
{
Mat outputImage, tempImage;
int morphologyType;
if(morphologyElement == 0)
morphologyType = MORPH_RECT;
else if(morphologyElement == 1)
morphologyType = MORPH_CROSS;
else if(morphologyElement == 2)
morphologyType = MORPH_ELLIPSE;
// Create the structuring element for erosion
Mat element = getStructuringElement(morphologyType, Size(2*morphologySize + 1, 2*morphologySize + 1), Point(morphologySize, morphologySize));
// Apply morphological opening to the image using the structuring element
erode(inputImage, tempImage, element);
dilate(tempImage, outputImage, element);
// Return the output image
return outputImage;
}
Closing
Mat performClosing(Mat inputImage, int morphologyElement, int morphologySize)
{
Mat outputImage, tempImage;
int morphologyType;
if(morphologyElement == 0)
morphologyType = MORPH_RECT;
else if(morphologyElement == 1)
morphologyType = MORPH_CROSS;
else if(morphologyElement == 2)
morphologyType = MORPH_ELLIPSE;
// Create the structuring element for erosion
Mat element = getStructuringElement(morphologyType, Size(2*morphologySize + 1, 2*morphologySize + 1), Point(morphologySize, morphologySize));
// Apply morphological opening to the image using the structuring element
dilate(inputImage, tempImage, element);
erode(tempImage, outputImage, element);
// Return the output image
return outputImage;
}
Morphological Gradient
Mat performMorphologicalGradient(Mat inputImage, int morphologyElement, int morphologySize)
{
Mat outputImage, tempImage1, tempImage2;
int morphologyType;
if(morphologyElement == 0)
morphologyType = MORPH_RECT;
else if(morphologyElement == 1)
morphologyType = MORPH_CROSS;
else if(morphologyElement == 2)
morphologyType = MORPH_ELLIPSE;
// Create the structuring element for erosion
Mat element = getStructuringElement(morphologyType, Size(2*morphologySize + 1, 2*morphologySize + 1), Point(morphologySize, morphologySize));
// Apply morphological gradient to the image using the structuring element
dilate(inputImage, tempImage1, element);
erode(inputImage, tempImage2, element);
// Return the output image
return tempImage1 - tempImage2;
}
TopHat
Mat performTopHat(Mat inputImage, int morphologyElement, int morphologySize)
{
Mat outputImage;
int morphologyType;
if(morphologyElement == 0)
morphologyType = MORPH_RECT;
else if(morphologyElement == 1)
morphologyType = MORPH_CROSS;
else if(morphologyElement == 2)
morphologyType = MORPH_ELLIPSE;
// Create the structuring element for erosion
Mat element = getStructuringElement(morphologyType, Size(2*morphologySize + 1, 2*morphologySize + 1), Point(morphologySize, morphologySize));
// Apply top hat operation to the image using the structuring element
outputImage = inputImage - performOpening(inputImage, morphologyElement, morphologySize);
// Return the output image
return outputImage;
}
BlackHat
Mat performBlackHat(Mat inputImage, int morphologyElement, int morphologySize)
{
Mat outputImage;
int morphologyType;
if(morphologyElement == 0)
morphologyType = MORPH_RECT;
else if(morphologyElement == 1)
morphologyType = MORPH_CROSS;
else if(morphologyElement == 2)
morphologyType = MORPH_ELLIPSE;
// Create the structuring element for erosion
Mat element = getStructuringElement(morphologyType, Size(2*morphologySize + 1, 2*morphologySize + 1), Point(morphologySize, morphologySize));
// Apply black hat operation to the image using the structuring element
outputImage = performClosing(inputImage, morphologyElement, morphologySize) - inputImage;
// Return the output image
return outputImage;
}
Slimming/Thickening The Shapes
Erosion
Mat performErosion(Mat inputImage, int erosionElement, int erosionSize)
{
Mat outputImage;
int erosionType;
if(erosionElement == 0)
erosionType = MORPH_RECT;
else if(erosionElement == 1)
erosionType = MORPH_CROSS;
else if(erosionElement == 2)
erosionType = MORPH_ELLIPSE;
// Create the structuring element for erosion
Mat element = getStructuringElement(erosionType, Size(2*erosionSize + 1, 2*erosionSize + 1), Point(erosionSize, erosionSize));
// Erode the image using the structuring element
erode(inputImage, outputImage, element);
// Return the output image
return outputImage;
}
Dilation
Mat performDilation(Mat inputImage, int dilationElement, int dilationSize)
{
Mat outputImage;
int dilationType;
if(dilationElement == 0)
dilationType = MORPH_RECT;
else if(dilationElement == 1)
dilationType = MORPH_CROSS;
else if(dilationElement == 2)
dilationType = MORPH_ELLIPSE;
// Create the structuring element for dilation
Mat element = getStructuringElement(dilationType, Size(2*dilationSize + 1, 2*dilationSize + 1), Point(dilationSize, dilationSize));
// Dilate the image using the structuring element
dilate(inputImage, outputImage, element);
// Return the output image
return outputImage;
}
Background Subtraction VS Frame Differencing
Background Subtraction
// Create MOG Background Subtractor object
pMOG= cv::bgsegm::createBackgroundSubtractorMOG();//new BackgroundSubtractorMOG();
// Create MOG2 Background Subtractor object
pMOG2 = createBackgroundSubtractorMOG2(20, 16, true);//new BackgroundSubtractorMOG2();
// Capture the current frame
cap >> frame;
// Resize the frame
resize(frame, frame, Size(), scalingFactor, scalingFactor, INTER_AREA);
// Update the MOG background model based on the current frame
pMOG->apply(frame, fgMaskMOG);
// Update the MOG2 background model based on the current frame
pMOG2->apply(frame, fgMaskMOG2);
// Show the current frame
//imshow("Frame", frame);
// Show the MOG foreground mask
//imshow("FG Mask MOG", fgMaskMOG);
// Show the MOG2 foreground mask
imshow("FG Mask MOG 2", fgMaskMOG2);
Frame Differencing
Mat frameDiff(Mat prevFrame, Mat curFrame, Mat nextFrame)
{
Mat diffFrames1, diffFrames2, output;
// Compute absolute difference between current frame and the next frame
absdiff(nextFrame, curFrame, diffFrames1);
// Compute absolute difference between current frame and the previous frame
absdiff(curFrame, prevFrame, diffFrames2);
// Bitwise "AND" operation between the above two diff images
bitwise_and(diffFrames1, diffFrames2, output);
return output;
}