OpenCV (计算立体图像的深度)
two camera
要计算立体视觉系统的深度图,就必须计算每个像素的视差。
得到水平极线
用鲁棒匹配算法(robustMatching)
,计算立体视觉系统的基础矩阵,得到水平极线。robustMatcher.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
class RobustMatcher {
private:
// pointer to the feature point detector object
cv::Ptr<cv::FeatureDetector> detector;
// pointer to the feature descriptor extractor object
cv::Ptr<cv::DescriptorExtractor> descriptor;
int normType;
float ratio; // max ratio between 1st and 2nd NN
bool refineF; // if true will refine the F matrix
bool refineM; // if true will refine the matches (will refine F also)
double distance; // min distance to epipolar
double confidence; // confidence level (probability)
public:
RobustMatcher(const cv::Ptr<cv::FeatureDetector> &detector,
const cv::Ptr<cv::DescriptorExtractor> &descriptor= cv::Ptr<cv::DescriptorExtractor>())
: detector(detector), descriptor(descriptor),normType(cv::NORM_L2),
ratio(0.8f), refineF(true), refineM(true), confidence(0.98), distance(1.0) {
// in this case use the associated descriptor
if (!this->descriptor) {
this->descriptor = this->detector;
}
}
// Set the feature detector
void setFeatureDetector(const cv::Ptr<cv::FeatureDetector>& detect) {
this->detector= detect;
}
// Set descriptor extractor
void setDescriptorExtractor(const cv::Ptr<cv::DescriptorExtractor>& desc) {
this->descriptor= desc;
}
// Set the norm to be used for matching
void setNormType(int norm) {
normType= norm;
}
// Set the minimum distance to epipolar in RANSAC
void setMinDistanceToEpipolar(double d) {
distance= d;
}
// Set confidence level in RANSAC
void setConfidenceLevel(double c) {
confidence= c;
}
// Set the NN ratio
void setRatio(float r) {
ratio= r;
}
// if you want the F matrix to be recalculated
void refineFundamental(bool flag) {
refineF= flag;
}
// if you want the matches to be refined using F
void refineMatches(bool flag) {
refineM= flag;
}
// Clear matches for which NN ratio is > than threshold
// return the number of removed points
// (corresponding entries being cleared, i.e. size will be 0)
int ratioTest(const std::vector<std::vector<cv::DMatch> >& inputMatches,
std::vector<cv::DMatch>& outputMatches) {
int removed=0;
// for all matches
for (std::vector<std::vector<cv::DMatch> >::const_iterator matchIterator= inputMatches.begin();
matchIterator!= inputMatches.end(); ++matchIterator) {
// first best match/second best match
if ((matchIterator->size() > 1) && // if 2 NN has been identified
(*matchIterator)[0].distance/(*matchIterator)[1].distance < ratio) {
// it is an acceptable match
outputMatches.push_back((*matchIterator)[0]);
} else {
removed++;
}
}
return removed;
}
// Insert symmetrical matches in symMatches vector
void symmetryTest(const std::vector<cv::DMatch>& matches1,
const std::vector<cv::DMatch>& matches2,
std::vector<cv::DMatch>& symMatches) {
// for all matches image 1 -> image 2
for (std::vector<cv::DMatch>::const_iterator matchIterator1= matches1.begin();
matchIterator1!= matches1.end(); ++matchIterator1) {
// for all matches image 2 -> image 1
for (std::vector<cv::DMatch>::const_iterator matchIterator2= matches2.begin();
matchIterator2!= matches2.end(); ++matchIterator2) {
// Match symmetry test
if (matchIterator1->queryIdx == matchIterator2->trainIdx &&
matchIterator2->queryIdx == matchIterator1->trainIdx) {
// add symmetrical match
symMatches.push_back(*matchIterator1);
break; // next match in image 1 -> image 2
}
}
}
}
// Apply both ratio and symmetry test
// (often an over-kill)
void ratioAndSymmetryTest(const std::vector<std::vector<cv::DMatch> >& matches1,
const std::vector<std::vector<cv::DMatch> >& matches2,
std::vector<cv::DMatch>& outputMatches) {
// Remove matches for which NN ratio is > than threshold
// clean image 1 -> image 2 matches
std::vector<cv::DMatch> ratioMatches1;
int removed= ratioTest(matches1,ratioMatches1);
std::cout << "Number of matched points 1->2 (ratio test) : " << ratioMatches1.size() << std::endl;
// clean image 2 -> image 1 matches
std::vector<cv::DMatch> ratioMatches2;
removed= ratioTest(matches2,ratioMatches2);
std::cout << "Number of matched points 1->2 (ratio test) : " << ratioMatches2.size() << std::endl;
// Remove non-symmetrical matches
symmetryTest(ratioMatches1,ratioMatches2,outputMatches);
std::cout << "Number of matched points (symmetry test): " << outputMatches.size() << std::endl;
}
// Identify good matches using RANSAC
// Return fundamental matrix and output matches
cv::Mat ransacTest(const std::vector<cv::DMatch>& matches,
std::vector<cv::KeyPoint>& keypoints1,
std::vector<cv::KeyPoint>& keypoints2,
std::vector<cv::DMatch>& outMatches) {
// Convert keypoints into Point2f
std::vector<cv::Point2f> points1, points2;
for (std::vector<cv::DMatch>::const_iterator it= matches.begin();
it!= matches.end(); ++it) {
// Get the position of left keypoints
points1.push_back(keypoints1[it->queryIdx].pt);
// Get the position of right keypoints
points2.push_back(keypoints2[it->trainIdx].pt);
}
// Compute F matrix using RANSAC
std::vector<uchar> inliers(points1.size(),0);
cv::Mat fundamental= cv::findFundamentalMat(
points1,points2, // matching points
inliers, // match status (inlier or outlier)
cv::FM_RANSAC, // RANSAC method
distance, // distance to epipolar line
confidence); // confidence probability
// extract the surviving (inliers) matches
std::vector<uchar>::const_iterator itIn= inliers.begin();
std::vector<cv::DMatch>::const_iterator itM= matches.begin();
// for all matches
for ( ;itIn!= inliers.end(); ++itIn, ++itM) {
if (*itIn) { // it is a valid match
outMatches.push_back(*itM);
}
}
if (refineF || refineM) {
// The F matrix will be recomputed with all accepted matches
// Convert keypoints into Point2f for final F computation
points1.clear();
points2.clear();
for (std::vector<cv::DMatch>::const_iterator it= outMatches.begin();
it!= outMatches.end(); ++it) {
// Get the position of left keypoints
points1.push_back(keypoints1[it->queryIdx].pt);
// Get the position of right keypoints
points2.push_back(keypoints2[it->trainIdx].pt);
}
// Compute 8-point F from all accepted matches
fundamental= cv::findFundamentalMat(
points1,points2, // matching points
cv::FM_8POINT); // 8-point method
if (refineM) {
std::vector<cv::Point2f> newPoints1, newPoints2;
// refine the matches
correctMatches(fundamental, // F matrix
points1, points2, // original position
newPoints1, newPoints2); // new position
for (int i=0; i< points1.size(); i++) {
std::cout << "(" << keypoints1[outMatches[i].queryIdx].pt.x
<< "," << keypoints1[outMatches[i].queryIdx].pt.y
<< ") -> ";
std::cout << "(" << newPoints1[i].x
<< "," << newPoints1[i].y << std::endl;
std::cout << "(" << keypoints2[outMatches[i].trainIdx].pt.x
<< "," << keypoints2[outMatches[i].trainIdx].pt.y
<< ") -> ";
std::cout << "(" << newPoints2[i].x
<< "," << newPoints2[i].y << std::endl;
keypoints1[outMatches[i].queryIdx].pt.x= newPoints1[i].x;
keypoints1[outMatches[i].queryIdx].pt.y= newPoints1[i].y;
keypoints2[outMatches[i].trainIdx].pt.x= newPoints2[i].x;
keypoints2[outMatches[i].trainIdx].pt.y= newPoints2[i].y;
}
}
}
return fundamental;
}
// Match feature points using RANSAC
// returns fundamental matrix and output match set
cv::Mat match(cv::Mat& image1, cv::Mat& image2, // input images
std::vector<cv::DMatch>& matches, // output matches and keypoints
std::vector<cv::KeyPoint>& keypoints1, std::vector<cv::KeyPoint>& keypoints2,
int check=CROSSCHECK) { // check type (symmetry or ratio or none or both)
// 1. Detection of the feature points
detector->detect(image1,keypoints1);
detector->detect(image2,keypoints2);
std::cout << "Number of feature points (1): " << keypoints1.size() << std::endl;
std::cout << "Number of feature points (2): " << keypoints2.size() << std::endl;
// 2. Extraction of the feature descriptors
cv::Mat descriptors1, descriptors2;
descriptor->compute(image1,keypoints1,descriptors1);
descriptor->compute(image2,keypoints2,descriptors2);
std::cout << "descriptor matrix size: " << descriptors1.rows << " by " << descriptors1.cols << std::endl;
// 3. Match the two image descriptors
// (optionaly apply some checking method)
// Construction of the matcher with crosscheck
cv::BFMatcher matcher(normType, //distance measure
check==CROSSCHECK); // crosscheck flag
// vectors of matches
std::vector<std::vector<cv::DMatch> > matches1;
std::vector<std::vector<cv::DMatch> > matches2;
std::vector<cv::DMatch> outputMatches;
// call knnMatch if ratio check is required
if (check==RATIOCHECK || check==BOTHCHECK) {
// from image 1 to image 2
// based on k nearest neighbours (with k=2)
matcher.knnMatch(descriptors1,descriptors2,
matches1, // vector of matches (up to 2 per entry)
2); // return 2 nearest neighbours
std::cout << "Number of matched points 1->2: " << matches1.size() << std::endl;
if (check==BOTHCHECK) {
// from image 2 to image 1
// based on k nearest neighbours (with k=2)
matcher.knnMatch(descriptors2,descriptors1,
matches2, // vector of matches (up to 2 per entry)
2); // return 2 nearest neighbours
std::cout << "Number of matched points 2->1: " << matches2.size() << std::endl;
}
}
// select check method
switch (check) {
case CROSSCHECK:
matcher.match(descriptors1,descriptors2,outputMatches);
std::cout << "Number of matched points 1->2 (after cross-check): " << outputMatches.size() << std::endl;
break;
case RATIOCHECK:
ratioTest(matches1,outputMatches);
std::cout << "Number of matched points 1->2 (after ratio test): " << outputMatches.size() << std::endl;
break;
case BOTHCHECK:
ratioAndSymmetryTest(matches1,matches2,outputMatches);
std::cout << "Number of matched points 1->2 (after ratio and cross-check): " << outputMatches.size() << std::endl;
break;
case NOCHECK:
default:
matcher.match(descriptors1,descriptors2,outputMatches);
std::cout << "Number of matched points 1->2: " << outputMatches.size() << std::endl;
break;
}
// 4. Validate matches using RANSAC
cv::Mat fundamental= ransacTest(outputMatches, keypoints1, keypoints2, matches);
std::cout << "Number of matched points (after RANSAC): " << matches.size() << std::endl;
// return the found fundamental matrix
return fundamental;
}
// Match feature points using RANSAC
// returns fundamental matrix and output match set
// this is the simplified version presented in the book
cv::Mat matchBook(cv::Mat& image1, cv::Mat& image2, // input images
std::vector<cv::DMatch>& matches, // output matches and keypoints
std::vector<cv::KeyPoint>& keypoints1, std::vector<cv::KeyPoint>& keypoints2) {
// 1. Detection of the feature points
detector->detect(image1,keypoints1);
detector->detect(image2,keypoints2);
// 2. Extraction of the feature descriptors
cv::Mat descriptors1, descriptors2;
descriptor->compute(image1,keypoints1,descriptors1);
descriptor->compute(image2,keypoints2,descriptors2);
// 3. Match the two image descriptors
// (optionnally apply some checking method)
// Construction of the matcher with crosscheck
cv::BFMatcher matcher(normType, //distance measure
true); // crosscheck flag
// match descriptors
std::vector<cv::DMatch> outputMatches;
matcher.match(descriptors1,descriptors2,outputMatches);
// 4. Validate matches using RANSAC
cv::Mat fundamental= ransacTest(outputMatches, keypoints1, keypoints2, matches);
// return the found fundemental matrix
return fundamental;
}
};robustMatcher.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
int main()
{
// Read input images
cv::Mat image1= cv::imread("brebeuf1.jpg",0);
cv::Mat image2= cv::imread("brebeuf2.jpg",0);
if (!image1.data || !image2.data)
return 0;
// Prepare the matcher (with default parameters)
// here SIFT detector and descriptor
RobustMatcher rmatcher(cv::xfeatures2d::SIFT::create(250));
// Match the two images
std::vector<cv::DMatch> matches;
std::vector<cv::KeyPoint> keypoints1, keypoints2;
cv::Mat fundamental = rmatcher.match(image1, image2, matches,
keypoints1, keypoints2);
// draw the matches
cv::Mat imageMatches;
cv::drawMatches(image1, keypoints1, // 1st image and its keypoints
image2, keypoints2, // 2nd image and its keypoints
matches, // the matches
imageMatches, // the image produced
cv::Scalar(255, 255, 255), // color of the lines
cv::Scalar(255, 255, 255), // color of the keypoints
std::vector<char>(),
2);
cv::namedWindow("Matches");
cv::imshow("Matches", imageMatches);
// Convert keypoints into Point2f
std::vector<cv::Point2f> points1, points2;
for (std::vector<cv::DMatch>::const_iterator it = matches.begin();
it != matches.end(); ++it) {
// Get the position of left keypoints
float x = keypoints1[it->queryIdx].pt.x;
float y = keypoints1[it->queryIdx].pt.y;
points1.push_back(keypoints1[it->queryIdx].pt);
// Get the position of right keypoints
x = keypoints2[it->trainIdx].pt.x;
y = keypoints2[it->trainIdx].pt.y;
points2.push_back(keypoints2[it->trainIdx].pt);
}
// Compute homographic rectification
cv::Mat h1, h2;
cv::stereoRectifyUncalibrated(points1, points2, fundamental, image1.size(), h1, h2);
// Rectify the images through warping
cv::Mat rectified1;
cv::warpPerspective(image1, rectified1, h1, image1.size());
cv::Mat rectified2;
cv::warpPerspective(image2, rectified2, h2, image1.size());
// Display the images
cv::namedWindow("Left Rectified Image");
cv::imshow("Left Rectified Image", rectified1);
cv::namedWindow("Right Rectified Image");
cv::imshow("Right Rectified Image", rectified2);
points1.clear();
points2.clear();
for (int i = 20; i < image1.rows - 20; i += 20) {
points1.push_back(cv::Point(image1.cols / 2, i));
points2.push_back(cv::Point(image2.cols / 2, i));
}
// Draw the epipolar lines
std::vector<cv::Vec3f> lines1;
cv::computeCorrespondEpilines(points1, 1, fundamental, lines1);
for (std::vector<cv::Vec3f>::const_iterator it = lines1.begin();
it != lines1.end(); ++it) {
cv::line(image2, cv::Point(0, -(*it)[2] / (*it)[1]),
cv::Point(image2.cols, -((*it)[2] + (*it)[0] * image2.cols) / (*it)[1]),
cv::Scalar(255, 255, 255));
}
std::vector<cv::Vec3f> lines2;
cv::computeCorrespondEpilines(points2, 2, fundamental, lines2);
for (std::vector<cv::Vec3f>::const_iterator it = lines2.begin();
it != lines2.end(); ++it) {
cv::line(image1, cv::Point(0, -(*it)[2] / (*it)[1]),
cv::Point(image1.cols, -((*it)[2] + (*it)[0] * image1.cols) / (*it)[1]),
cv::Scalar(255, 255, 255));
}
// Display the images with epipolar lines
cv::namedWindow("Left Epilines");
cv::imshow("Left Epilines", image1);
cv::namedWindow("Right Epilines");
cv::imshow("Right Epilines", image2);
// draw the pair
cv::drawMatches(image1, keypoints1, // 1st image
image2, keypoints2, // 2nd image
std::vector<cv::DMatch>(),
imageMatches, // the image produced
cv::Scalar(255, 255, 255),
cv::Scalar(255, 255, 255),
std::vector<char>(),
2);
cv::namedWindow("A Stereo pair");
cv::imshow("A Stereo pair", imageMatches);
// Compute disparity
cv::Mat disparity;
cv::Ptr<cv::StereoMatcher> pStereo = cv::StereoSGBM::create(0, // minimum disparity
32, // maximum disparity
5); // block size
pStereo->compute(rectified1, rectified2, disparity);
// draw the rectified pair
/*
cv::warpPerspective(image1, rectified1, h1, image1.size());
cv::warpPerspective(image2, rectified2, h2, image1.size());
cv::drawMatches(rectified1, keypoints1, // 1st image
rectified2, keypoints2, // 2nd image
std::vector<cv::DMatch>(),
imageMatches, // the image produced
cv::Scalar(255, 255, 255),
cv::Scalar(255, 255, 255),
std::vector<char>(),
2);
cv::namedWindow("Rectified Stereo pair");
cv::imshow("Rectified Stereo pair", imageMatches);
*/
double minv, maxv;
disparity = disparity * 64;
cv::minMaxLoc(disparity, &minv, &maxv);
std::cout << minv << "+" << maxv << std::endl;
// Display the disparity map
cv::namedWindow("Disparity Map");
cv::imshow("Disparity Map", disparity);
cv::waitKey();
return 0;
}利用单应变换将每个相机的图像平面投影到完全对齐的虚拟平面上。
1
2
3
4
5
6
7
8
9
10
11
12// 计算单应变换矫正量
Mat h1, h2;
stereoRectifyUncalibrated(points1, points2, fundamental, image1.size(), h1, h2);
// 用变换实现图像校正
Mat rectified1;
warpPerspective(image1, rectified1, h1, image1.size());
Mat rectified2;
warpPerspective(image2, rectified2, h2, image1.size()); // ??? image1 or image2
// 计算视差
Mat disparity;
Ptr<StereoMatcher> pStereo = StereoSGBM::create(0, 32, 5); // 最小视差,最大视差,块的大小
pStereo -> compute(rectified1, rectified2, disparity);部分对极线
经矫正的图像对
视差图:亮的地方视差大,离物体近
- Post title:OpenCV (计算立体图像的深度)
- Post author:Eva.Q
- Create time:2021-08-04 09:34:18
- Post link:https://qyy/2021/08/04/OPENCV/OPENCV1-6/
- Copyright Notice:All articles in this blog are licensed under BY-NC-SA unless stating additionally.