Gets an runtime error when running the following code error: OpenCV Error: Bad argument (The input arrays should be 2D or 3D point sets)
Gets an runtime error when running the following code error: OpenCV Error: Bad argument (The input arrays should be 2D or 3D point sets)
错误:错误:cv::findHomography 中的错误参数(输入数组应为 2D 或 3D 点集),文件 C:\opencv\source\opencv-3.3.0\modules\calib3d\src\fundam.cpp,行341
我是 c++ 和 opencv3.3 的新手。以下是我正在尝试开发的用于拼接 2 张图像的程序。我有类似的 python 代码,我正在尝试将其转换为 C++。出现运行错误,请帮助..
#include <opencv2/opencv.hpp>
#include <opencv2/xfeatures2d.hpp>
using namespace std;
using namespace cv;
Mat DetectAndDescribe(Mat image) {
//create the greyscale image
Mat grayImage;
cv::cvtColor(image, grayImage, COLOR_BGR2GRAY);
//create the keypoint detector and descriptor as "feature" using SIFT
Ptr<Feature2D> feature = xfeatures2d::SIFT::create();
//create a matrix of keypoints using feature
vector<KeyPoint> keypoints;
feature->detect(grayImage, keypoints);
//create a maatrix of descriptors using feature and keypoints
Mat descriptor;
feature->compute(grayImage, keypoints, descriptor);
return (keypoints, descriptor);
}
Mat matchKeypoints(Mat imageA, Mat imageB, vector<KeyPoint> keypointA, vector<KeyPoint> keypointB, Mat featuresA, Mat featuresB, float ratio, double repojThresh){
//create a vector of vector to hold raw matches
vector<vector<DMatch>> rawMatches;
//create a vector of DMatches to hold good matches
vector<DMatch> goodMatches;
//create two vector points to hold the points where the lines will be drawn
vector<Point2f> pointsA;
vector<Point2f> pointsB;
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce");
matcher->knnMatch(featuresA, featuresB, rawMatches, 2);
goodMatches.reserve(rawMatches.size());
/* double tresholdDist = ratio * sqrt(double(imageA.size().height*imageA.size().height + imageA.size().width*imageA.size().width));
for (size_t i = 0; i < rawMatches.size(); ++i)
{
for (int j = 0; j < rawMatches[i].size(); j++)
{
Point2f from = keypointA[rawMatches[i][j].queryIdx].pt;
Point2f to = keypointB[rawMatches[i][j].trainIdx].pt;
//calculate local distance for each possible match
double dist = sqrt((from.x - to.x) * (from.x - to.x) + (from.y - to.y) * (from.y - to.y));
//save as best match if local distance is in specified area and on same height
if (dist < tresholdDist && abs(from.y - to.y)<2)
{
goodMatches.push_back(rawMatches[i][j]);
j = rawMatches[i].size();
}
}
}
*/
for (size_t i = 0; i < rawMatches.size(); i++)
{
if ((rawMatches[i].size()==2) && (rawMatches[i][0].distance < (rawMatches[i][1].distance*ratio)))
{
goodMatches.push_back(rawMatches[i][0]);
}
}
cv::KeyPoint::convert(keypointA, pointsA);
cv::KeyPoint::convert(keypointB, pointsB);
if (goodMatches.size() > 4) {
Mat homographyM = findHomography(pointsA, pointsB, RANSAC, repojThresh);
return(goodMatches, homographyM);
}
}
Mat drawMatches(Mat imageA, Mat imageB, vector<KeyPoint> keypointsA, vector<KeyPoint> keypointsB, vector<DMatch> matches) {
//initialize the output visualization image
float hA = imageA.size().height;
float wA = imageA.size().width;
float hB = imageB.size().height;
float wB = imageB.size().width;
Mat resultImage = Mat(fmax(hA, hB), wA + wB, 3, "uint8");
//connect lines between the selected points
Point2f pointA;
Point2f pointB;
for (int i=0; i<matches.size();i++) {
pointA = Point2f(keypointsA[matches[i].queryIdx].pt.x, keypointsA[matches[i].queryIdx].pt.y);
pointB = Point2f(keypointsA[matches[i].trainIdx].pt.x+ wA, keypointsB[matches[i].trainIdx].pt.y);
cv::line(resultImage, pointA, pointB, (0, 255, 0), 1);
}
return resultImage;
}
Mat stitch(Mat imageA, Mat imageB, float ratio, double repojThresh, bool showMatches) {
vector<KeyPoint> keypointA;
vector<KeyPoint> keypointB;
Mat featuresA;
Mat featuresB;
Mat matchFeatures;
Mat matches;
Mat homographyM;
Mat result;
float hA = imageA.size().height;
float wA = imageA.size().width;
float hB = imageB.size().height;
float wB = imageB.size().width;
(keypointA, featuresA) = DetectAndDescribe(imageA);
(keypointB, featuresB) = DetectAndDescribe(imageB);
(matches, homographyM) = matchKeypoints(imageA, imageB, keypointA, keypointB, featuresA, featuresB, ratio, repojThresh);
cv::warpPerspective(imageA, result, homographyM, Size(wA+wB, hA));
//Point a cv::Mat header at it (no allocation is done)
Mat final(Size(imageB.cols * 2 + imageB.cols, imageA.rows * 2), CV_8UC3);
//velikost img1
Mat roi1(final, Rect(0, 0, imageB.cols, imageB.rows));
Mat roi2(final, Rect(0, 0, result.cols, result.rows));
result.copyTo(roi2);
imageB.copyTo(roi1);
if (showMatches) {
Mat visiblelines = drawMatches(imageA, imageB, keypointA, keypointB, matches);
return (result, visiblelines);
}
return result;
}
int main() {
Mat imageA = cv::imread("left.jpg", 1);
Mat imageB = cv::imread("middle.jpg", 1);
Mat result;
Mat visiblelines;
(result, visiblelines) = stitch(imageB, imageA, 0.75, 4.0, true);
cv::imshow("KeyPoint matches", visiblelines);
cv::imshow("Resulting Image", result);
cv::imwrite("Result.jpg", result);
cv::waitKey(0);
cv::destroyAllWindows();
}
我认为,来自 python 的您可能误解了 return 在 C/C++ 中的工作方式。看起来你的函数 DetectAndDescribe returning 只有第二个值(描述符),所以
return (keypoints, descriptor);
等同于
return descriptor;
此外
(keypointA, featuresA) = DetectAndDescribe(imageA);
(keypointB, featuresB) = DetectAndDescribe(imageB);
实际上等同于
featuresA = DetectAndDescribe(imageA);
featuresB = DetectAndDescribe(imageB);
看看comma operator在不重载的情况下在C++中的工作原理(我不相信它在OpenCV中,但如果我错了,我会被那些肯定知道的人惩罚)
一个简单的例子:
int bb = 42;
int cc;
(bb, cc) = (3,5);
std::cout << std::endl << bb << std::endl << cc;
会输出
42
5
没有
3
5
差点忘了说,对于 return 多个值,您可以通过引用传递它们并在函数内部赋值:
void DetectAndDescribe(Mat image, vector<KeyPoint>& keypoints, Mat& descriptor) {
//create the greyscale image
Mat grayImage;
cv::cvtColor(image, grayImage, COLOR_BGR2GRAY);
//create the keypoint detector and descriptor as "feature" using SIFT
Ptr<Feature2D> feature = xfeatures2d::SIFT::create();
//create a matrix of keypoints using feature
feature->detect(grayImage, keypoints);
//create a maatrix of descriptors using feature and keypoints
feature->compute(grayImage, keypoints, descriptor);
}
并这样称呼它:
DetectAndDescribe(imageA, keypointA, featuresA);
最后,按值传递矩阵等大型结构是一种不好的做法,您也应该通过引用传递它们
void DetectAndDescribe(Mat& image, vector<KeyPoint>& keypoints, Mat& descriptor)
或者如果您不打算将它们修改为常量引用:
void DetectAndDescribe(const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptor)
错误:错误:cv::findHomography 中的错误参数(输入数组应为 2D 或 3D 点集),文件 C:\opencv\source\opencv-3.3.0\modules\calib3d\src\fundam.cpp,行341
我是 c++ 和 opencv3.3 的新手。以下是我正在尝试开发的用于拼接 2 张图像的程序。我有类似的 python 代码,我正在尝试将其转换为 C++。出现运行错误,请帮助..
#include <opencv2/opencv.hpp>
#include <opencv2/xfeatures2d.hpp>
using namespace std;
using namespace cv;
Mat DetectAndDescribe(Mat image) {
//create the greyscale image
Mat grayImage;
cv::cvtColor(image, grayImage, COLOR_BGR2GRAY);
//create the keypoint detector and descriptor as "feature" using SIFT
Ptr<Feature2D> feature = xfeatures2d::SIFT::create();
//create a matrix of keypoints using feature
vector<KeyPoint> keypoints;
feature->detect(grayImage, keypoints);
//create a maatrix of descriptors using feature and keypoints
Mat descriptor;
feature->compute(grayImage, keypoints, descriptor);
return (keypoints, descriptor);
}
Mat matchKeypoints(Mat imageA, Mat imageB, vector<KeyPoint> keypointA, vector<KeyPoint> keypointB, Mat featuresA, Mat featuresB, float ratio, double repojThresh){
//create a vector of vector to hold raw matches
vector<vector<DMatch>> rawMatches;
//create a vector of DMatches to hold good matches
vector<DMatch> goodMatches;
//create two vector points to hold the points where the lines will be drawn
vector<Point2f> pointsA;
vector<Point2f> pointsB;
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce");
matcher->knnMatch(featuresA, featuresB, rawMatches, 2);
goodMatches.reserve(rawMatches.size());
/* double tresholdDist = ratio * sqrt(double(imageA.size().height*imageA.size().height + imageA.size().width*imageA.size().width));
for (size_t i = 0; i < rawMatches.size(); ++i)
{
for (int j = 0; j < rawMatches[i].size(); j++)
{
Point2f from = keypointA[rawMatches[i][j].queryIdx].pt;
Point2f to = keypointB[rawMatches[i][j].trainIdx].pt;
//calculate local distance for each possible match
double dist = sqrt((from.x - to.x) * (from.x - to.x) + (from.y - to.y) * (from.y - to.y));
//save as best match if local distance is in specified area and on same height
if (dist < tresholdDist && abs(from.y - to.y)<2)
{
goodMatches.push_back(rawMatches[i][j]);
j = rawMatches[i].size();
}
}
}
*/
for (size_t i = 0; i < rawMatches.size(); i++)
{
if ((rawMatches[i].size()==2) && (rawMatches[i][0].distance < (rawMatches[i][1].distance*ratio)))
{
goodMatches.push_back(rawMatches[i][0]);
}
}
cv::KeyPoint::convert(keypointA, pointsA);
cv::KeyPoint::convert(keypointB, pointsB);
if (goodMatches.size() > 4) {
Mat homographyM = findHomography(pointsA, pointsB, RANSAC, repojThresh);
return(goodMatches, homographyM);
}
}
Mat drawMatches(Mat imageA, Mat imageB, vector<KeyPoint> keypointsA, vector<KeyPoint> keypointsB, vector<DMatch> matches) {
//initialize the output visualization image
float hA = imageA.size().height;
float wA = imageA.size().width;
float hB = imageB.size().height;
float wB = imageB.size().width;
Mat resultImage = Mat(fmax(hA, hB), wA + wB, 3, "uint8");
//connect lines between the selected points
Point2f pointA;
Point2f pointB;
for (int i=0; i<matches.size();i++) {
pointA = Point2f(keypointsA[matches[i].queryIdx].pt.x, keypointsA[matches[i].queryIdx].pt.y);
pointB = Point2f(keypointsA[matches[i].trainIdx].pt.x+ wA, keypointsB[matches[i].trainIdx].pt.y);
cv::line(resultImage, pointA, pointB, (0, 255, 0), 1);
}
return resultImage;
}
Mat stitch(Mat imageA, Mat imageB, float ratio, double repojThresh, bool showMatches) {
vector<KeyPoint> keypointA;
vector<KeyPoint> keypointB;
Mat featuresA;
Mat featuresB;
Mat matchFeatures;
Mat matches;
Mat homographyM;
Mat result;
float hA = imageA.size().height;
float wA = imageA.size().width;
float hB = imageB.size().height;
float wB = imageB.size().width;
(keypointA, featuresA) = DetectAndDescribe(imageA);
(keypointB, featuresB) = DetectAndDescribe(imageB);
(matches, homographyM) = matchKeypoints(imageA, imageB, keypointA, keypointB, featuresA, featuresB, ratio, repojThresh);
cv::warpPerspective(imageA, result, homographyM, Size(wA+wB, hA));
//Point a cv::Mat header at it (no allocation is done)
Mat final(Size(imageB.cols * 2 + imageB.cols, imageA.rows * 2), CV_8UC3);
//velikost img1
Mat roi1(final, Rect(0, 0, imageB.cols, imageB.rows));
Mat roi2(final, Rect(0, 0, result.cols, result.rows));
result.copyTo(roi2);
imageB.copyTo(roi1);
if (showMatches) {
Mat visiblelines = drawMatches(imageA, imageB, keypointA, keypointB, matches);
return (result, visiblelines);
}
return result;
}
int main() {
Mat imageA = cv::imread("left.jpg", 1);
Mat imageB = cv::imread("middle.jpg", 1);
Mat result;
Mat visiblelines;
(result, visiblelines) = stitch(imageB, imageA, 0.75, 4.0, true);
cv::imshow("KeyPoint matches", visiblelines);
cv::imshow("Resulting Image", result);
cv::imwrite("Result.jpg", result);
cv::waitKey(0);
cv::destroyAllWindows();
}
我认为,来自 python 的您可能误解了 return 在 C/C++ 中的工作方式。看起来你的函数 DetectAndDescribe returning 只有第二个值(描述符),所以
return (keypoints, descriptor);
等同于
return descriptor;
此外
(keypointA, featuresA) = DetectAndDescribe(imageA);
(keypointB, featuresB) = DetectAndDescribe(imageB);
实际上等同于
featuresA = DetectAndDescribe(imageA);
featuresB = DetectAndDescribe(imageB);
看看comma operator在不重载的情况下在C++中的工作原理(我不相信它在OpenCV中,但如果我错了,我会被那些肯定知道的人惩罚)
一个简单的例子:
int bb = 42;
int cc;
(bb, cc) = (3,5);
std::cout << std::endl << bb << std::endl << cc;
会输出
42
5
没有
3
5
差点忘了说,对于 return 多个值,您可以通过引用传递它们并在函数内部赋值:
void DetectAndDescribe(Mat image, vector<KeyPoint>& keypoints, Mat& descriptor) {
//create the greyscale image
Mat grayImage;
cv::cvtColor(image, grayImage, COLOR_BGR2GRAY);
//create the keypoint detector and descriptor as "feature" using SIFT
Ptr<Feature2D> feature = xfeatures2d::SIFT::create();
//create a matrix of keypoints using feature
feature->detect(grayImage, keypoints);
//create a maatrix of descriptors using feature and keypoints
feature->compute(grayImage, keypoints, descriptor);
}
并这样称呼它:
DetectAndDescribe(imageA, keypointA, featuresA);
最后,按值传递矩阵等大型结构是一种不好的做法,您也应该通过引用传递它们
void DetectAndDescribe(Mat& image, vector<KeyPoint>& keypoints, Mat& descriptor)
或者如果您不打算将它们修改为常量引用:
void DetectAndDescribe(const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptor)