(1)直方图伸展以及直方图均衡化
观察某些图像的释放图可以很容易看到,整个可用的强度值范围并没有被完全利用,特别是图像中比较亮的强度值。这里,通过伸展直方图来生成一个对比度更高的图像。
然后,利用EMgucv的自带函数进行直方图均衡化,值得注意的是,函数库里面的直方图均衡命令是针对灰度值而言的,那么,在处理彩色图像的时候,要么将其转换为灰度图像,要么针对BGR三个通道的直方图分别进行均衡,然后将三幅单通道图像合并得到最后的彩色均衡化图像,也是本节的方案。
//基于查找表的直方图伸展,图像增强
private void button39_Click(object sender, EventArgs e)
{
if (chapter4Img.IsEmpty)
return;
var dhData = getDenseHistogramDataOfImage(chapter4Img);//默认直方图256Bins,像素值方位[0.255]
RangeF[] minValueAndMaxVlue = new RangeF[3];
float minValue = 0.01f;//忽略数量少于minValue的箱子,定义数值范围0-1
if (minValue < 0)
minValue = 0;
else if (minValue > 1)
minValue = 1;
minValue = chapter4Img.Rows * chapter4Img.Cols * minValue;//计算得到最小箱子
for(int i=0;i<dhData.Length;i++)
{
if(dhData[i]==null)
{
minValueAndMaxVlue[i].Max = minValueAndMaxVlue[i].Min = -1f;//及不存在这个通道的数据
continue;
}
minValueAndMaxVlue[i].Min = 0f;
for (;minValueAndMaxVlue[i].Min < dhData[i].GetBinValues().Length; minValueAndMaxVlue[i].Min+=1)
{
if (dhData[i].GetBinValues()[(int)minValueAndMaxVlue[i].Min] > minValue)
break;
}
minValueAndMaxVlue[i].Max = dhData[i].GetBinValues().Length - 1;
for(;minValueAndMaxVlue[i].Max>=0;minValueAndMaxVlue[i].Max-=1)
{
if (dhData[i].GetBinValues()[(int)minValueAndMaxVlue[i].Max] > minValue)
break;
}
}
var lut = new Image<Bgr, byte>(256, 1);
List<byte[]> arrayData = new List<byte[]>();
arrayData.Add(new byte[256]);
arrayData.Add(new byte[256]);
arrayData.Add(new byte[256]);
if (chapter4Img.NumberOfChannels==1)
{
for(int i=0;i<256;i++)
{
if (i < minValueAndMaxVlue[0].Min)
arrayData[0][i] = 0;
else if (i > minValueAndMaxVlue[0].Max)
arrayData[0][i] = 255;
else
arrayData[0][i] = (byte)Math.Round(255.0 * (i - minValueAndMaxVlue[0].Min) / (minValueAndMaxVlue[0].Max - minValueAndMaxVlue[0].Min));
}
}
else//对每个通道进行计算,得到范围
{
for(int index=0;index<3;index++)
{
arrayData[index] = new byte[256];
for (int i = 0; i < 256; i++)
{
if (i < minValueAndMaxVlue[index].Min)
arrayData[index][i] = 0;
else if (i > minValueAndMaxVlue[0].Max)
arrayData[index][i] = 255;
else
arrayData[index][i] = (byte)Math.Round(255.0 * (i - minValueAndMaxVlue[index].Min) / (minValueAndMaxVlue[index].Max - minValueAndMaxVlue[0].Min));
}
}
}
for(int i=0;i<256;i++)
{
for(int j=0;j<3;j++)
{
lut.Data[0, i, j] = arrayData[j][i];
}
}
Mat resultImg = new Mat();
if(chapter4Img.NumberOfChannels==1)
{
CvInvoke.LUT(chapter4Img, lut.Split()[0].Mat, resultImg);
}
else
{
CvInvoke.LUT(chapter4Img, lut.Split()[0].Mat, resultImg);
}
CvInvoke.NamedWindow("Based on One Channel (Blue)", NamedWindowType.Normal);
CvInvoke.Imshow("Based on One Channel (Blue)", resultImg);
//对所有的通道取同样的minValueAndMaxValue范围计算
arrayData.Add(new byte[256]);//再加一个数组
int newMinValue = Math.Min((int)Math.Min(minValueAndMaxVlue[0].Min, minValueAndMaxVlue[1].Min), (int)minValueAndMaxVlue[2].Min);
int newMaxValue= Math.Max((int)Math.Max(minValueAndMaxVlue[0].Max, minValueAndMaxVlue[1].Max), (int)minValueAndMaxVlue[2].Max);
for(int i=0;i<256;i++)
{
if (i < newMinValue)
arrayData[3][i] = 0;
else if (i > newMaxValue)
arrayData[3][i] = 255;
else
arrayData[3][i] = (byte)((255.0*(i - newMinValue) / (newMaxValue - newMinValue)));
}
var newLut = new Mat(1, 256, DepthType.Cv8U, 1);
newLut.SetTo(arrayData[3]);
if(chapter4Img.NumberOfChannels==3)
{
CvInvoke.LUT(chapter4Img, newLut, resultImg);
CvInvoke.NamedWindow("Based on BGR three Channels", NamedWindowType.KeepRatio);
CvInvoke.Imshow("Based on BGR three Channels", resultImg);
}
else
{
CvInvoke.LUT(chapter4Img, lut.Split()[0].Mat, resultImg);
CvInvoke.NamedWindow("Based on One Channel", NamedWindowType.KeepRatio);
CvInvoke.Imshow("Based on One Channel", resultImg);
}
}
private void button40_Click(object sender, EventArgs e)//直方图均衡,使用opencv命令
{
if (chapter4Img.IsEmpty)
return;
if(chapter4Img.NumberOfChannels==3)
{
var imgs = chapter4Img.ToImage<Bgr, byte>().Split();
CvInvoke.EqualizeHist(imgs[0], imgs[0]);
CvInvoke.EqualizeHist(imgs[1], imgs[1]);
CvInvoke.EqualizeHist(imgs[2], imgs[2]);
var dstImg = chapter4Img.ToImage<Bgr, byte>();
for(int i=0;i<dstImg.Rows;i++)
for(int j=0;j<dstImg.Cols;j++)
{
dstImg.Data[i, j, 0] = imgs[0].Data[i, j, 0];
dstImg.Data[i, j, 1] = imgs[1].Data[i, j, 0];
dstImg.Data[i, j, 2] = imgs[2].Data[i, j, 0];
}
CvInvoke.NamedWindow("Opencv EqualizeHist", NamedWindowType.KeepRatio);
CvInvoke.Imshow("Opencv EqualizeHist", dstImg);
}
else
{
var dstImg = chapter4Img.Clone();
CvInvoke.EqualizeHist(dstImg, dstImg);
CvInvoke.NamedWindow("Opencv EqualizeHist", NamedWindowType.KeepRatio);
CvInvoke.Imshow("Opencv EqualizeHist", dstImg);
}
}
这里说明提下,中间的两幅图像是使用直方图伸展后得到的。上图是分别对BGR三个通道进行伸展后合并得到的彩色图像,下图是对BGR三个通道进行伸展时所采用的参数为Blue通道直方图伸展时使用的参数;
最右边的是使用Opencv(EMGucv)自带直方图函数处理的结果,处理时分别对BGR三个通道的图像进行均衡再合并得到最终图像。
(2)反向投影直方图检测特定图像内容
part1 灰度图像
//反向投影直方图,基本原理是根据ROI图像直方图信息,获得原始图像中的像素点对应该直方图归一化后的概率,即概率越高,说明该点属于ROI的概率越大。代码中用到了三种,前两种是自己写的,最后一个使用了DenseHistogram类中的反向投影函数。从结果来看,DenseHistogram的方法最优
private void button41_Click(object sender, EventArgs e)
{
if (chapter4Img.IsEmpty)
return;
Mat img = new Mat();
if (chapter4Img.NumberOfChannels != 1)
CvInvoke.CvtColor(chapter4Img, img, ColorConversion.Bgr2Gray);//若是多通道图像,先转换为灰度图像
else
img = chapter4Img.Clone();
var imgROI = new Mat(img, new Rectangle(100,100,200,200));
CvInvoke.Imshow("ROI", imgROI);//ROI图像
var hist = getDenseHistogramDataOfImage(imgROI);//获得ROI区域的直方图信息
//此时 得到的DenseHistogram数组必然只含有一个对象
//直接将图像中的像素替换为ROi直方图对应的强度值
var resultimg2 = img.ToImage<Gray, byte>();
for(int i=0;i<resultimg2.Rows;i++)
for(int j=0;j<resultimg2.Cols;j++)
{
resultimg2.Data[i, j, 0] = (byte)(hist[0].GetBinValues()[resultimg2.Data[i, j, 0]]);
}
CvInvoke.NamedWindow("backByDIY1", NamedWindowType.KeepRatio);
CvInvoke.Imshow("backByDIY1", resultimg2);
//先将ROI的一维直方图归一化,再重复上面的操作,并将得到的数值乘255,使得最终范围还是0-255
double[] aaa = new double[256];
for(int i=0;i<256;i++)
{
aaa[i] = hist[0].GetBinValues()[i];
}
var histData = new Mat(1, 256, DepthType.Cv64F, 1);
histData.SetTo(aaa);
CvInvoke.Normalize(histData, histData, 1.0);//归一化
var normalizedData = histData.ToImage<Gray, double>();
var resultimg = img.ToImage<Gray, byte>();
for(int i=0;i<resultimg.Rows;i++)
for(int j=0;j<resultimg.Cols;j++)
{
resultimg.Data[i, j, 0] = (byte)(255.0 * normalizedData.Data[0, resultimg.Data[i, j, 0], 0]);
}
CvInvoke.NamedWindow("backByDIY2", NamedWindowType.KeepRatio);
CvInvoke.Imshow("backByDIY2", resultimg);
//直接使用DenseHistogram中的backprojection方法
var resultImg = hist[0].BackProject<byte>(new Image<Gray, byte>[] { img.ToImage<Gray, byte>() });
CvInvoke.NamedWindow("backByDenseHistogram", NamedWindowType.KeepRatio);
CvInvoke.Imshow("backByDenseHistogram", resultImg);
}
part2 RGB彩色图像
//基于RGB三通道的彩色图像反向投影直方图,分别对三个通道进行search,最后整合成彩色图像
//需要注意的是,这里的彩色图像是最后根据ROi直方图反向投影得到,所以和原始彩色图像的颜色会有所不同
private void button42_Click(object sender, EventArgs e)
{
if (chapter4Img.IsEmpty)
return;
if (chapter4Img.NumberOfChannels != 3)//不是三通道,因为想偷懒
return;
var imgs = chapter4Img.ToImage<Bgr, byte>().Split();
var imgROI = new Mat(chapter4Img, new Rectangle(240, 130, 110, 110));
CvInvoke.NamedWindow("ROI", NamedWindowType.KeepRatio);
CvInvoke.Imshow("ROI", imgROI);
var hist = getDenseHistogramDataOfImage(imgROI);//此时有三个DenseHistogram对象
var resultImgB = hist[0].BackProject<byte>(new Image<Gray, byte>[] { imgs[0] });
var resultImgG = hist[1].BackProject<byte>(new Image<Gray, byte>[] { imgs[1] });
var resultImgR = hist[2].BackProject<byte>(new Image<Gray, byte>[] { imgs[2] });
CvInvoke.NamedWindow("R", NamedWindowType.KeepRatio);
CvInvoke.Imshow("R", resultImgR);
CvInvoke.NamedWindow("G", NamedWindowType.KeepRatio);
CvInvoke.Imshow("G", resultImgG);
CvInvoke.NamedWindow("B", NamedWindowType.KeepRatio);
CvInvoke.Imshow("B", resultImgB);
var resultImg = chapter4Img.ToImage<Bgr, byte>();
for(int i=0;i<resultImg.Rows;i++)
for(int j=0;j<resultImg.Cols;j++)
{
resultImg.Data[i, j, 0] = resultImgB.Data[i, j, 0];
resultImg.Data[i, j, 1] = resultImgG.Data[i, j, 0];
resultImg.Data[i, j, 2] = resultImgR.Data[i, j, 0];
}
CvInvoke.NamedWindow("backByDenseHistogram", NamedWindowType.KeepRatio);
CvInvoke.Imshow("backByDenseHistogram", resultImg);
}
(3)基于图像HSV空间色调分量的均值平移算法查找目标
本例中,首先读取一个狒狒图像,设置ROI为狒狒脸部。然后,读取需要检测的图像,并查找该图像中狒狒的脸部区域
private void button43_Click(object sender, EventArgs e)
{
Mat baseMat = new Mat();
Mat detectedMat = new Mat();
if (chapter1OFD.ShowDialog() == DialogResult.OK)
baseMat = CvInvoke.Imread(chapter1OFD.FileName, LoadImageType.AnyColor | LoadImageType.AnyColor);//获取基准图像,该图像用来获得狒狒脸部ROI
if (baseMat.IsEmpty | baseMat.NumberOfChannels != 3)
return;
var rect = new Rectangle(113, 252, 35, 40);
var tempImg = baseMat.Clone();
CvInvoke.NamedWindow("baseImage", NamedWindowType.KeepRatio);
CvInvoke.Rectangle(tempImg, rect, new MCvScalar(0, 0, 255), 2);
CvInvoke.Imshow("baseImage", tempImg);//显示基准图像,并标出ROI区域
var imgROI = new Mat(baseMat, rect);//狒狒脸部ROI
//屏蔽饱和度过低的像素,得到掩码mask
int minSat = 65;
var hsvROI = new Mat();
CvInvoke.CvtColor(imgROI, hsvROI, ColorConversion.Bgr2Hsv);
var hsvROIImg = hsvROI.ToImage<Hsv, byte>();
var mask = new Mat();
CvInvoke.Threshold(hsvROIImg.Split()[1], mask, minSat, 255, ThresholdType.Binary);
//得到ROI色调直方图,mask中的像素没有参与计算
DenseHistogram hist = new DenseHistogram(181, new RangeF(0f, 180.1f));
hist.Calculate<byte>(new Image<Gray, byte>[] { hsvROIImg.Split()[0] }, false, mask.ToImage<Gray, byte>());
//获取将要检测的图像,并对HSV图像中的色调图像反投影
if (chapter1OFD.ShowDialog() == DialogResult.OK)
detectedMat = CvInvoke.Imread(chapter1OFD.FileName, LoadImageType.AnyColor | LoadImageType.AnyColor);//获取图像
if (detectedMat.IsEmpty | detectedMat.NumberOfChannels != 3)
return;
var hsvMat = new Mat();
CvInvoke.CvtColor(detectedMat, hsvMat, ColorConversion.Bgr2Hsv);//获取HSV数据
var resultImg = hist.BackProject<byte>(new Image<Gray, byte>[] { hsvMat.ToImage<Hsv, byte>().Split()[0] });
var criteria = new MCvTermCriteria(1000, 0.01);
CvInvoke.MeanShift(resultImg, ref rect, criteria);//均值偏移,计算得到脸部区域
tempImg = detectedMat.Clone();
CvInvoke.Rectangle(tempImg, rect, new MCvScalar(255, 0, 0), 2);
CvInvoke.NamedWindow("detectedImage", NamedWindowType.KeepRatio);
CvInvoke.Imshow("detectedImage", tempImg);
}
看结果还是可以的~
(4)基于直方图的图相似相似度比较,这里用到了CvInvoke.CompareHist这个函数
private void button44_Click(object sender, EventArgs e)
{
Mat baseImage = new Mat();
Mat inputImage = new Mat();
string baseImageName="", inputImageName="";
if (chapter1OFD.ShowDialog() == DialogResult.OK)
{
baseImage = CvInvoke.Imread(chapter1OFD.FileName, LoadImageType.AnyColor | LoadImageType.AnyDepth);
baseImageName = chapter1OFD.SafeFileName;
}
if (chapter1OFD.ShowDialog() == DialogResult.OK)
{
inputImage = CvInvoke.Imread(chapter1OFD.FileName, LoadImageType.AnyColor | LoadImageType.AnyDepth);
inputImageName = chapter1OFD.SafeFileName;
}
if (baseImage.IsEmpty | inputImage.IsEmpty | baseImage.NumberOfChannels != 3 | inputImage.NumberOfChannels != 3 | baseImage.Size != inputImage.Size | baseImage.Depth != inputImage.Depth)
return;
ImageComparator ic = new ImageComparator();
ic.setBaseImage(baseImage);
ic.setInputImage(inputImage);
ic.setNumberOfBins(8);
double iii = ic.calculateDifference();
int ii = 0;
textBox3.Text += baseImageName+" VS "+inputImageName+" : " + iii+Environment.NewLine;
}
//定义一个用于基于直方图比较的图像相似度计算类
public class ImageComparator
{
private Mat baseImg;
private Mat inputImg;
int nBins;//每个颜色通道使用的箱子数量
public ImageComparator()
{
nBins = 8;//默认8个箱子
}
public void setNumberOfBins(int n)
{
nBins = n;
}
public void setBaseImage(Mat img)
{
baseImg = img;
}
public void setInputImage(Mat img)
{
inputImg = img;
}
public double calculateDifference()//简化代码量,直接默认输入图像都是BGR三通道彩色图像
{
DenseHistogram[] histogramOfBaseImage = new DenseHistogram[3];
DenseHistogram[] histogramOfInputImage = new DenseHistogram[3];
for(int i=0;i<3;i++)
{
histogramOfBaseImage[i] = new DenseHistogram(nBins, new RangeF(0f, 255.1f));
histogramOfBaseImage[i].Calculate<byte>(new Image<Gray, byte>[] { baseImg.ToImage<Bgr, byte>().Split()[i] }, false, null);
histogramOfInputImage[i] = new DenseHistogram(nBins, new RangeF(0f, 255.1f));
histogramOfInputImage[i].Calculate<byte>(new Image<Gray, byte>[] { inputImg.ToImage<Bgr, byte>().Split()[i] }, false, null);
}
Image<Bgr, float> histOfBaseImg = new Image<Bgr, float>(nBins, 1);
Image<Bgr, float> histOfInputImg = new Image<Bgr, float>(nBins, 1);
for(int i=0;i<3;i++)
for(int j=0;j<nBins;j++)
{
histOfBaseImg.Data[0, j, i] = (float)histogramOfBaseImage[i].GetBinValues()[j];
histOfInputImg.Data[0, j, i] = (float)histogramOfInputImage[i].GetBinValues()[j];
}
return CvInvoke.CompareHist(histOfBaseImg, histOfInputImg, HistogramCompMethod.Bhattacharyya);//也可以不用巴氏系数,采用其他比较方法
}
}
(5)基于积分图像的自动化阈值分割
private void button45_Click(object sender, EventArgs e)
{
if (chapter4Img.IsEmpty)
return;
Mat img = new Mat();
if (chapter4Img.NumberOfChannels == 3)
CvInvoke.CvtColor(chapter4Img, img, ColorConversion.Bgr2Gray);
else
img = chapter4Img.Clone();
Mat integralImg = new Mat();
CvInvoke.Integral(img, integralImg);
imageBox17.Image = integralImg;//此时生成的积分图像大小为(x+1,y+1),在这种情况下,SAT(0,x)=SAT(x,0)=SAT(0,0)=0
var imgData = img.ToImage<Gray, byte>();
var integralImgData = integralImg.ToImage<Gray, float>();
int threshold = 10;
int halfSize = 10;
int blockSize = 2 * halfSize + 1;
for(int i=halfSize+1;i<img.Rows-halfSize;i++)
for(int j=halfSize+1;j<img.Cols-halfSize;j++)
{
int sum = (int)(integralImgData.Data[i + halfSize + 1, j + halfSize + 1, 0] + integralImgData.Data[i - halfSize, j - halfSize, 0] - integralImgData.Data[i + halfSize + 1, j - halfSize, 0] - integralImgData.Data[i - halfSize, j + halfSize + 1, 0]);
if (imgData.Data[i, j, 0] < (sum/(blockSize*blockSize) - threshold))
imgData.Data[i, j, 0] = 0;
else
imgData.Data[i, j, 0] = 255;
}
imageBox17.Image = imgData;
//下面部分代码使用opencv自带的自适应阈值分割方法
Mat opencvMat = new Mat();
CvInvoke.AdaptiveThreshold(img, opencvMat, 255, AdaptiveThresholdType.MeanC, ThresholdType.Binary, blockSize, threshold);
CvInvoke.NamedWindow("OpenCV-AdaptiveThreshold", NamedWindowType.KeepRatio);
CvInvoke.Imshow("OpenCV-AdaptiveThreshold", opencvMat);
}
右边是直接采用opencv的自动化阈值分割方法实现~在自定义方法中,并没有对边缘数据进行处理~