OpenCV (跟踪运动目标)
Eva.Q Lv9

Tracking ~ ~

学这有啥用嘞

对于手持摄像机拍摄的视频,可以用这种方法消除抖动或减小抖动幅度,时视频更加平稳

运功估值还可用于视频编码,用以压缩视频,便于传输和存储

跟踪特征点

在最初的帧中检测特征点,在下一帧中跟踪这些特征点

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
class FeatureTracker : public FrameProcessor {

cv::Mat gray; // current gray-level image
cv::Mat gray_prev; // previous gray-level image
std::vector<cv::Point2f> points[2]; // tracked features from 0->1
std::vector<cv::Point2f> initial; // initial position of tracked points
std::vector<cv::Point2f> features; // detected features
int max_count; // maximum number of features to detect
double qlevel; // quality level for feature detection
double minDist; // minimum distance between two feature points
std::vector<uchar> status; // status of tracked features
std::vector<float> err; // error in tracking

public:

FeatureTracker() : max_count(500), qlevel(0.01), minDist(10.) {}

// processing method
void process(cv:: Mat &frame, cv:: Mat &output) {

// convert to gray-level image
cv::cvtColor(frame, gray, CV_BGR2GRAY);
frame.copyTo(output);

// 1. if new feature points must be added
if(addNewPoints())
{
// detect feature points
detectFeaturePoints();
// add the detected features to the currently tracked features
points[0].insert(points[0].end(),features.begin(),features.end());
initial.insert(initial.end(),features.begin(),features.end());
}

// for first image of the sequence
if(gray_prev.empty())
gray.copyTo(gray_prev);

// 2. track features
cv::calcOpticalFlowPyrLK(gray_prev, gray, // 2 consecutive images
points[0], // input point position in first image
points[1], // output point postion in the second image
status, // tracking success
err); // tracking error

// 3. loop over the tracked points to reject the undesirables
int k=0;
for( int i= 0; i < points[1].size(); i++ ) {

// do we keep this point?
if (acceptTrackedPoint(i)) {

// keep this point in vector
initial[k]= initial[i];
points[1][k++] = points[1][i];
}
}

// eliminate unsuccesful points
points[1].resize(k);
initial.resize(k);

// 4. handle the accepted tracked points
handleTrackedPoints(frame, output);

// 5. current points and image become previous ones
std::swap(points[1], points[0]);
cv::swap(gray_prev, gray);
}

// feature point detection
void detectFeaturePoints() {

// detect the features
cv::goodFeaturesToTrack(gray, // the image
features, // the output detected features
max_count, // the maximum number of features
qlevel, // quality level
minDist); // min distance between two features
}

// determine if new points should be added
bool addNewPoints() {

// if too few points
return points[0].size()<=10;
}

// determine which tracked point should be accepted
// here we keep only moving points
bool acceptTrackedPoint(int i) {

return status[i] && // status is false if unable to track point i
// if point has moved
(abs(points[0][i].x-points[1][i].x)+
(abs(points[0][i].y-points[1][i].y))>2);
}

// handle the currently tracked points
void handleTrackedPoints(cv:: Mat &frame, cv:: Mat &output) {

// for all tracked points
for(int i= 0; i < points[1].size(); i++ ) {

// draw line and circle
cv::line(output, initial[i], points[1][i], cv::Scalar(255,255,255));
cv::circle(output, points[1][i], 3, cv::Scalar(255,255,255),-1);
}
}
};

#endif

开始跟踪!!

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
int main()
{
// Create video procesor instance
VideoProcessor processor;

// Create feature tracker instance
FeatureTracker tracker;

// Open video file
processor.setInput("bike.avi");

// set frame processor
processor.setFrameProcessor(&tracker);

// Declare a window to display the video
processor.displayOutput("Tracked Features");

// Play the video at the original frame rate
processor.setDelay(1000./processor.getFrameRate());

processor.stopAtFrameNo(90);

// Start the process
processor.run();

cv::waitKey();
}

估算光流

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
// Drawing optical flow vectors on an image
void drawOpticalFlow(const cv::Mat& oflow, // the optical flow
cv::Mat& flowImage, // the produced image
int stride, // the stride for displaying the vectors
float scale, // multiplying factor for the vectors
const cv::Scalar& color) // the color of the vectors
{

// create the image if required
if (flowImage.size() != oflow.size()) {
flowImage.create(oflow.size(), CV_8UC3);
flowImage = cv::Vec3i(255,255,255);
}

// for all vectors using stride as a step
for (int y = 0; y < oflow.rows; y += stride)
for (int x = 0; x < oflow.cols; x += stride) {
// gets the vector
cv::Point2f vector = oflow.at< cv::Point2f>(y, x);
// draw the line
cv::line(flowImage, cv::Point(x, y),
cv::Point(static_cast<int>(x + scale*vector.x + 0.5),
static_cast<int>(y + scale*vector.y + 0.5)), color);
// draw the arrow tip
cv::circle(flowImage, cv::Point(static_cast<int>(x + scale*vector.x + 0.5),
static_cast<int>(y + scale*vector.y + 0.5)), 1, color, -1);
}
}

int main()
{
// pick 2 frames of the sequence
cv::Mat frame1= cv::imread("goose/goose230.bmp", 0);
cv::Mat frame2= cv::imread("goose/goose237.bmp", 0);

// Combined display
cv::Mat combined(frame1.rows, frame1.cols + frame2.cols, CV_8U);
frame1.copyTo(combined.colRange(0, frame1.cols));
frame2.copyTo(combined.colRange(frame1.cols, frame1.cols+frame2.cols));
cv::imshow("Frames", combined);

// Create the optical flow algorithm
cv::Ptr<cv::DualTVL1OpticalFlow> tvl1 = cv::createOptFlow_DualTVL1();

std::cout << "regularization coeeficient: " << tvl1->getLambda() << std::endl; // the smaller the soomther
std::cout << "Number of scales: " << tvl1->getScalesNumber() << std::endl; // number of scales
std::cout << "Scale step: " << tvl1->getScaleStep() << std::endl; // size between scales
std::cout << "Number of warpings: " << tvl1->getWarpingsNumber() << std::endl; // size between scales
std::cout << "Stopping criteria: " << tvl1->getEpsilon() << " and " << tvl1->getOuterIterations() << std::endl; // size between scales
// compute the optical flow between 2 frames
cv::Mat oflow; // image of 2D flow vectors
// compute optical flow between frame1 and frame2
tvl1->calc(frame1, frame2, oflow);

// Draw the optical flow image
cv::Mat flowImage;
drawOpticalFlow(oflow, // input flow vectors
flowImage, // image to be generated
8, // display vectors every 8 pixels
2, // multiply size of vectors by 2
cv::Scalar(0, 0, 0)); // vector color

cv::imshow("Optical Flow", flowImage);

// compute a smoother optical flow between 2 frames
tvl1->setLambda(0.075);
tvl1->calc(frame1, frame2, oflow);

// Draw the optical flow image
cv::Mat flowImage2;
drawOpticalFlow(oflow, // input flow vectors
flowImage2, // image to be generated
8, // display vectors every 8 pixels
2, // multiply size of vectors by 2
cv::Scalar(0, 0, 0)); // vector color

cv::imshow("Smoother Optical Flow", flowImage2);
cv::waitKey();
}

跟踪视频中的物体

visualTracker.h

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
#if !defined FTRACKER
#define FTRACKER

#include <string>
#include <vector>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2/tracking/tracker.hpp>

#include "videoprocessor.h"

class VisualTracker : public FrameProcessor {

cv::Ptr<cv::Tracker> tracker;
cv::Rect2d box;
bool reset;

public:

// constructor specifying the tracker to be used
VisualTracker(cv::Ptr<cv::Tracker> tracker) :
reset(true), tracker(tracker) {}

// set the bounding box to initiate tracking
void setBoundingBox(const cv::Rect2d& bb) {

box = bb;
reset = true;
}

// callback processing method
void process(cv:: Mat &frame, cv:: Mat &output) {

if (reset) { // new tracking session
reset = false;

tracker->init(frame, box);


} else { // update the target's position

tracker->update(frame, box);
}

// draw bounding box on current frame
frame.copyTo(output);
cv::rectangle(output, box, cv::Scalar(255, 255, 255), 2);
}
};

#endif
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
int main()
{
// Create video procesor instance
VideoProcessor processor;

// generate the filename
std::vector<std::string> imgs;
std::string prefix = "goose/goose";
std::string ext = ".bmp";

// Add the image names to be used for tracking
for (long i = 130; i < 317; i++) {

std::string name(prefix);
std::ostringstream ss; ss << std::setfill('0') << std::setw(3) << i; name += ss.str();
name += ext;

std::cout << name << std::endl;
imgs.push_back(name);
}

// Create feature tracker instance
cv::Ptr<cv::TrackerMedianFlow> ptr= cv::TrackerMedianFlow::createTracker();
VisualTracker tracker(ptr);
// VisualTracker tracker(cv::TrackerKCF::createTracker());

// Open video file
processor.setInput(imgs);

// set frame processor
processor.setFrameProcessor(&tracker);

// Declare a window to display the video
processor.displayOutput("Tracked object");

// Define the frame rate for display
processor.setDelay(50);

// Specify the original target position
cv::Rect bb(290, 100, 65, 40);
tracker.setBoundingBox(bb);

// Start the tracking
processor.run();

cv::waitKey();

// Illustration of the Median Tracker principle
cv::Mat image1 = cv::imread("goose/goose130.bmp", cv::ImreadModes::IMREAD_GRAYSCALE);

// define a regular grid of points
std::vector<cv::Point2f> grid;
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
cv::Point2f p(bb.x+i*bb.width/10.,bb.y+j*bb.height/10);
grid.push_back(p);
}
}

// track in next image
cv::Mat image2 = cv::imread("goose/goose131.bmp", cv::ImreadModes::IMREAD_GRAYSCALE);
std::vector<cv::Point2f> newPoints;
std::vector<uchar> status; // status of tracked features
std::vector<float> err; // error in tracking

// track the points
cv::calcOpticalFlowPyrLK(image1, image2, // 2 consecutive images
grid, // input point position in first image
newPoints, // output point postion in the second image
status, // tracking success
err); // tracking error

// Draw the points
for (cv::Point2f p : grid) {

cv::circle(image1, p, 1, cv::Scalar(255, 255, 255), -1);
}
cv::imshow("Initial points", image1);

for (cv::Point2f p : newPoints) {

cv::circle(image2, p, 1, cv::Scalar(255, 255, 255), -1);
}
cv::imshow("Tracked points", image2);

cv::waitKey();
}

  • Post title:OpenCV (跟踪运动目标)
  • Post author:Eva.Q
  • Create time:2021-08-04 14:17:42
  • Post link:https://qyy/2021/08/04/OPENCV/OPENCV1-8/
  • Copyright Notice:All articles in this blog are licensed under BY-NC-SA unless stating additionally.