OpenCV (处理图像的颜色)
Eva.Q Lv9

COLORFUL ~ ~ ~ ~ ~

用策略设计模式比较颜色

识别图像中具有某种颜色的像素

代码

我们用策略设计模式把算法封装进类

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
#if !defined COLORDETECT
#define COLORDETECT

#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>

class ColorDetector {

private:

// minimum acceptable distance
int maxDist;

// target color
cv::Vec3b target;

// image containing color converted image
cv::Mat converted;
bool useLab;

// image containing resulting binary map
cv::Mat result;

public:

// empty constructor
// default parameter initialization here
ColorDetector() : maxDist(100), target(0,0,0), useLab(false) {}

// extra constructor for Lab color space example
ColorDetector(bool useLab) : maxDist(100), target(0,0,0), useLab(useLab) {}

// full constructor
ColorDetector(uchar blue, uchar green, uchar red, int mxDist=100, bool useLab=false): maxDist(mxDist), useLab(useLab) {

// target color
setTargetColor(blue, green, red);
}

// Computes the distance from target color.
int getDistanceToTargetColor(const cv::Vec3b& color) const {
return getColorDistance(color, target);
}

// Computes the city-block distance between two colors.
int getColorDistance(const cv::Vec3b& color1, const cv::Vec3b& color2) const {

return abs(color1[0]-color2[0])+
abs(color1[1]-color2[1])+
abs(color1[2]-color2[2]);

// Or:
// return static_cast<int>(cv::norm<int,3>(cv::Vec3i(color[0]-color2[0],color[1]-color2[1],color[2]-color2[2])));

// Or:
// cv::Vec3b dist;
// cv::absdiff(color,color2,dist);
// return cv::sum(dist)[0];
}

// Processes the image. Returns a 1-channel binary image.
cv::Mat process(const cv::Mat &image);

cv::Mat operator()(const cv::Mat &image) {

cv::Mat input;

if (useLab) { // Lab conversion
cv::cvtColor(image, input, CV_BGR2Lab);
}
else {
input = image;
}

cv::Mat output;
// compute absolute difference with target color
cv::absdiff(input,cv::Scalar(target),output);
// split the channels into 3 images
std::vector<cv::Mat> images;
cv::split(output,images);
// add the 3 channels (saturation might occurs here)
output= images[0]+images[1]+images[2];
// apply threshold
cv::threshold(output, // input image
output, // output image
maxDist, // threshold (must be < 256)
255, // max value
cv::THRESH_BINARY_INV); // thresholding type

return output;
}

// Getters and setters

// Sets the color distance threshold.
// Threshold must be positive, otherwise distance threshold
// is set to 0.
void setColorDistanceThreshold(int distance) {

if (distance<0)
distance=0;
maxDist= distance;
}

// Gets the color distance threshold
int getColorDistanceThreshold() const {

return maxDist;
}

// Sets the color to be detected
// given in BGR color space
void setTargetColor(uchar blue, uchar green, uchar red) {

// BGR order
target = cv::Vec3b(blue, green, red);

if (useLab) {
// Temporary 1-pixel image
cv::Mat tmp(1, 1, CV_8UC3);
tmp.at<cv::Vec3b>(0, 0) = cv::Vec3b(blue, green, red);

// Converting the target to Lab color space
cv::cvtColor(tmp, tmp, CV_BGR2Lab);

target = tmp.at<cv::Vec3b>(0, 0);
}
}

// Sets the color to be detected
void setTargetColor(cv::Vec3b color) {

target= color;
}

// Gets the color to be detected
cv::Vec3b getTargetColor() const {

return target;
}
};


#endif

这样就可以通过创建类的实施来部署算法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
// 1. Create image processor object
ColorDetector cdetect;

// 2. Read input image
cv::Mat image= cv::imread("boldt.jpg");
if (image.empty())
return 0;
cv::namedWindow("Original Image");
cv::imshow("Original Image", image);

// 3. Set input parameters
cdetect.setTargetColor(230,190,130); // here blue sky

// 4. Process the image and display the result
cv::namedWindow("result");
cv::Mat result = cdetect.process(image);
cv::imshow("result",result);
实现原理

这个算法的核心过程非常简单,只是对每个像素进行循环扫描,把它的颜色和目标颜色做比较

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
// get the iterators
cv::Mat_<cv::Vec3b>::const_iterator it= image.begin<cv::Vec3b>();
cv::Mat_<cv::Vec3b>::const_iterator itend= image.end<cv::Vec3b>();
cv::Mat_<uchar>::iterator itout= result.begin<uchar>();

// get the iterators of the converted image
if (useLab) {
it = converted.begin<cv::Vec3b>();
itend = converted.end<cv::Vec3b>();
}

// for each pixel
for ( ; it!= itend; ++it, ++itout) {

// process each pixel ---------------------
// compute distance from target color
if (getDistanceToTargetColor(*it)<maxDist) {
*itout= 255;
} else {
*itout= 0;
}

// end of pixel processing ----------------
}

计算与目标色的差距

1
2
3
4
5
6
7
8
9
10
// Computes the distance from target color.
int getDistanceToTargetColor(const cv::Vec3b& color) const {
return getColorDistance(color, target);
}

// Computes the city-block distance between two colors.
int getColorDistance(const cv::Vec3b& color1, const cv::Vec3b& color2) const {

return abs(color1[0]-color2[0]) + abs(color1[1]-color2[1]) + abs(color1[2]-color2[2]);
}

用 GrabCut 算法分割图像

grabCut 函数的用法非常简单,只需要输入一幅图像,并对一些像素做上 ”属于背景“ 或 ”属于前景“ 的标记即可。根据这个局部标记,算法将计算出整幅图像的 前景/背景分割线。

一种指定输入图像局部前景/背景标签的方法是定义一个包含前景物体的矩形:

1
Rect rectangle(5, 70, 260, 120); // 矩形外部的像素会被标记为背景

调用函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
// define bounding rectangle 
cv::Rect rectangle(50,25,210,180);
// the models (internally used)
cv::Mat bgModel,fgModel;
// segmentation result
cv::Mat result; // segmentation (4 possible values)

// GrabCut segmentation
cv::grabCut(image, // input image
result, // segmentation result
rectangle,// rectangle containing foreground
bgModel, fgModel, // models
5, // number of iterations 迭代次数
cv::GC_INIT_WITH_RECT); // use rectangle

cv::GC_INIT_WITH_RECT 使用带边框的矩形模型

1
2
3
4
5
6
// Get the pixels marked as likely foreground
cv::compare(result,cv::GC_PR_FGD,result,cv::CMP_EQ);

cv::Mat foreground(image.size(), CV_8UC3, cv::Scalar(255, 255, 255));

image.copyTo(foreground,result); // bg pixels not copied

cv::GC_BGD 明确属于背景的像素明确属于背景的像素(本例中矩形以外的像素)

cv::GC_FGD 明确属于前景的像素(本例中无)

cv::GC_PR_BGD 可能属于背景的像素

cv::GC_PR_FGD 可能属于前景的像素(本例中矩形之内像素的初始值)

若想要提取全部前景像素,及值为 cv::GC_FGDcv::GC_PR_FGD 的像素,可以检查第一位的值

1
result = result&1;

这可能是因为这几个常量别定义的值为1和3,而另外两个被定义为0和2.。

转换颜色表示法

RGB颜色空间 红绿蓝组合后可以产生色域很宽的各种颜色,与人类视觉系统对应。当三种颜色强度相同时就会取得灰度。但空间并不是 感知均匀的色彩空间 ,所以计算颜色之间的差距并不是衡量两个颜色相似度的最好方法。

为了解决这一问题,引入了一些具有 感知均匀特性 的颜色表示法。

CIE L*a*b 用欧几里得距离衡量两种颜色的相似度。

在 process 方法中先把输入图像转换成 CIE L*a*b 色彩空间

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
cv::Mat ColorDetector::process(const cv::Mat &image) {

// re-allocate binary map if necessary
// same size as input image, but 1-channel
result.create(image.size(),CV_8U);

// Converting to Lab color space
if (useLab)
cv::cvtColor(image, converted, CV_BGR2Lab);

// get the iterators
cv::Mat_<cv::Vec3b>::const_iterator it= image.begin<cv::Vec3b>();
cv::Mat_<cv::Vec3b>::const_iterator itend= image.end<cv::Vec3b>();
cv::Mat_<uchar>::iterator itout= result.begin<uchar>();

// get the iterators of the converted image
if (useLab) {
it = converted.begin<cv::Vec3b>();
itend = converted.end<cv::Vec3b>();
}

// for each pixel
for ( ; it!= itend; ++it, ++itout) {
…………

转换后的变量包含颜色转换后的图像,被定义为类 ColorDetector 的一个属性

1
2
3
4
5
6
class ColorDetector {

private:

// image containing color converted image
cv::Mat converted;

输入的目标颜色也需要转换,需要让函数与上面的函数签名一样,即用户提供的颜色仍然是RGB格式的

1
2
3
4
5
6
7
8
void setTargetColor(uchar blue, uchar green, uchar red) {
Mat tmp(1, 1, CV_8UC3);
tmp.at<cv::Vec3d>(0, 0) = cv::Vec3d(blue, green, red);

cv::cvtColor(tmp, tmp, CV_BGR2Lab);

target = tmp.at<cv::Vec3d>(0, 0);
}

用色调、饱和度和亮度表示颜色(HSV)

颜色空间转换
1
2
3
4
5
6
7
8
9
10
// convert into HSV space
cv::Mat hsv;
cv::cvtColor(image, hsv, CV_BGR2HSV);

// split the 3 channels into 3 images
std::vector<cv::Mat> channels;
cv::split(hsv, channels);
// channels[0] is the Hue
// channels[1] is the Saturation
// channels[2] is the Value
颜色用于检测:肤色检测

mask 掩码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
void detectHScolor(const cv::Mat& image,		// input image 
double minHue, double maxHue, // Hue interval
double minSat, double maxSat, // saturation interval
cv::Mat& mask) { // output mask

// convert into HSV space
cv::Mat hsv;
cv::cvtColor(image, hsv, CV_BGR2HSV);

// split the 3 channels into 3 images
std::vector<cv::Mat> channels;
cv::split(hsv, channels);
// channels[0] is the Hue
// channels[1] is the Saturation
// channels[2] is the Value

// Hue masking
cv::Mat mask1; // below maxHue
cv::threshold(channels[0], mask1, maxHue, 255, cv::THRESH_BINARY_INV);
cv::Mat mask2; // over minHue
cv::threshold(channels[0], mask2, minHue, 255, cv::THRESH_BINARY);

cv::Mat hueMask; // hue mask
if (minHue < maxHue)
hueMask = mask1 & mask2;
else // if interval crosses the zero-degree axis
hueMask = mask1 | mask2;

// Saturation masking
// below maxSat
cv::threshold(channels[1], mask1, maxSat, 255, cv::THRESH_BINARY_INV);
// over minSat
cv::threshold(channels[1], mask2, minSat, 255, cv::THRESH_BINARY);

cv::Mat satMask; // saturation mask
satMask = mask1 & mask2;

// combined mask
mask = hueMask&satMask;
}

该函数的调用

1
2
3
4
5
6
7
8
9
10
11
// detect skin tone
cv::Mat mask;
detectHScolor(image,
160, 10, // hue from 320 degrees to 20 degrees
25, 166, // saturation from ~0.1 to 0.65
mask);

// show masked image
cv::Mat detected(image.size(), CV_8UC3, cv::Scalar(0, 0, 0));
image.copyTo(detected, mask);
cv::imshow("Detection result",detected);
  • Post title:OpenCV (处理图像的颜色)
  • Post author:Eva.Q
  • Create time:2021-08-11 10:07:28
  • Post link:https://qyy/2021/08/11/OPENCV/OPENCV1-12/
  • Copyright Notice:All articles in this blog are licensed under BY-NC-SA unless stating additionally.