原文:http://blog.youkuaiyun.com/yeyang911/article/details/18256393
自己测试的效果不是很好
实现函数功能
J = low_out +(high_out - low_out).* ((I - low_in)/(high_in - low_in)).^ gamma
- IplImage* ImageAdjust(IplImage *src, IplImage *dst,
- double low_in, double high_in,
- double low_out, double high_out, double gamma )
- {
- double low2 = low_in*255;
- double high2 = high_in*255;
- double bottom2 = low_out*255;
- double top2 = high_out*255;
- double err_in = high2 - low2;
- double err_out = top2 - bottom2;
- int x,y;
- double val0,val1,val2;
- // intensity transform
- for( y = 0; y < src->height; y++) {
- for (x = 0; x < src->width; x++){
- val0 = ((uchar*)(src->imageData+src->widthStep*y))[x*src->nChannels];
- val0 = pow((val0 - low2)/err_in,gamma)*err_out+bottom2;
- if(val0>255) val0=255;
- if(val0<0) val0=0;
- ((uchar*)(dst->imageData+dst->widthStep*y))[x*src->nChannels]=(uchar)val0;
- val1 = ((uchar*)(src->imageData+src->widthStep*y))[x*src->nChannels+1];
- val1 = pow((val1- low2)/err_in, gamma)*err_out+bottom2;
- if(val1>255) val1=255;
- if(val1<0) val1=0;
- ((uchar*)(dst->imageData+dst->widthStep*y))[x*src->nChannels+1]=(uchar)val1;
- val2 = ((uchar*)(src->imageData + src->widthStep*y))[x*src->nChannels+2];
- val2 = pow((val2-low2)/err_in,gamma)*err_out+bottom2;
- if(val2>255) val2=255;
- if(val2<0) val2=0; // Make sure src is in the range [low,high]
- ((uchar*)(dst->imageData+dst->widthStep*y))[x*src->nChannels+2]=(uchar)val2;
- }
- }
- return 0;
- }
测试代码:
- int main()
- {
- IplImage *src=cvLoadImage("d:/111.JPG",1);
- CvSize a;
- a.width=src->width;
- a.height=src->height;
- IplImage *dst = cvCreateImage(a,8,3);
- ImageAdjust(src, dst,
- 0, 0.5,
- 0.5, 1, 1) ;
- Mat c = dst;
- imshow("ss",c);
- waitKey();
- return 0;
- }
效果图 原图
EMGU CV 版本
- private void imageAdjust(Image<Bgr,byte>src,Image<Bgr,byte>dst,double low_in, double high_in,
- double low_out, double high_out, double gamma)
- {
- double low2 = low_in * 255;
- double high2 = high_in * 255;
- double bottom2 = low_out * 255;
- double top2 = high_out * 255;
- double err_in = high2 - low2;
- double err_out = top2 - bottom2;
- int x, y;
- double val0, val1, val2;
- // intensity transform
- for (y = 0; y < src.Height; y++)
- {
- for (x = 0; x < src.Width; x++)
- {
- val0 = src.Data[y, x, 0];
- val0 = Math.Pow((val0 - low2) / err_in, gamma) * err_out + bottom2;
- if (val0 > 255) val0 = 255;
- if (val0 < 0) val0 = 0;
- dst.Data[y,x,0] = (byte)val0;
- val1 = src.Data[y, x, 1];
- val1 = Math.Pow((val1 - low2) / err_in, gamma) * err_out + bottom2;
- if (val1 > 255) val1 = 255;
- if (val1 < 0) val1 = 0;
- dst.Data[y, x, 1] = (byte)val1;
- val2 = src.Data[y, x, 2];
- val2 = Math.Pow((val2 - low2) / err_in, gamma) * err_out + bottom2;
- if (val2 > 255) val2 = 255;
- if (val2 < 0) val2 = 0; // Make sure src is in the range [low,high]
- dst.Data[y, x, 2] = (byte)val2;
- }
- }
- }
上面的代码对于大图运行效率低,下面我写个优化版的
代码如下:
- private void imadjust(Image<Gray,byte> src)
- {
- double minV , maxV ;
- int [] minP = new int[2];
- int [] maxP = new int[2];
- CvInvoke.MinMaxIdx(src, out minV, out maxV, minP, maxP);
- Mat m = src.Mat;
- m.ConvertTo(m, Emgu.CV.CvEnum.DepthType.Cv32F);
- Mat n = new Mat(m.Size,Emgu.CV.CvEnum.DepthType.Cv32F,1);
- MCvScalar p = new MCvScalar();
- p.V0 = 1.0/(maxV-minV);
- n.SetTo(p);
- Mat dst = new Mat(m.Size,Emgu.CV.CvEnum.DepthType.Cv32F,1);
- CvInvoke.Multiply(m, n, dst);
- p.V0 = 255;
- n.SetTo(p);//设置矩阵为p.V0 的值
- CvInvoke.Multiply(dst, n, dst);
- pictureBox1.Image = dst.ToImage<Gray, byte>().ToBitmap();// 显示到pictureBox上
- }
都是采用向量化(矢量化)编程方式,效率不会差,但是我是简单实现,假设输出为最小为0 最大值为255的情况,且gamma 为1时的情况
如果需要别的情形需要自己根据情况编写。
这是另一个版本,只能灰度图:
参考:http://stackoverflow.com/questions/31647274/is-there-any-function-equivalent-to-matlabs-imadjust-in-opencv-with-c
网页还有一个别的,可以测试一下
#include <opencv2/opencv.hpp> #include <vector> using namespace std; using namespace cv; /* src and dst are grayscale, 8-bit images; Default input value: [low, high] = [0,1]; X-Direction [bottom, top] = [0,1]; Y-Direction gamma ; if adjust successfully, return 0, otherwise, return non-zero. */ #include <algorithm> using namespace std; using namespace cv; void imadjust(const Mat1b& src, Mat1b& dst, int tol = 1, Vec2i in = Vec2i(0, 255), Vec2i out = Vec2i(0, 255)) { // src : input CV_8UC1 image // dst : output CV_8UC1 imge // tol : tolerance, from 0 to 100. // in : src image bounds // out : dst image buonds dst = src.clone(); tol = max(0, min(100, tol)); if (tol > 0) { // Compute in and out limits // Histogram vector<int> hist(256, 0); for (int r = 0; r < src.rows; ++r) { for (int c = 0; c < src.cols; ++c) { hist[src(r,c)]++; } } // Cumulative histogram vector<int> cum = hist; for (int i = 1; i < hist.size(); ++i) { cum[i] = cum[i - 1] + hist[i]; } // Compute bounds int total = src.rows * src.cols; int low_bound = total * tol / 100; int upp_bound = total * (100-tol) / 100; in[0] = distance(cum.begin(), lower_bound(cum.begin(), cum.end(), low_bound)); in[1] = distance(cum.begin(), lower_bound(cum.begin(), cum.end(), upp_bound)); } // Stretching float scale = float(out[1] - out[0]) / float(in[1] - in[0]); for (int r = 0; r < dst.rows; ++r) { for (int c = 0; c < dst.cols; ++c) { int vs = max(src(r, c) - in[0], 0); int vd = min(int(vs * scale + 0.5f) + out[0], out[1]); dst(r, c) = saturate_cast<uchar>(vd); } } } int main() { Mat3b img = imread("fish.png"); Mat1b gray; cvtColor(img, gray, COLOR_RGB2GRAY); Mat1b adjusted; imadjust(gray, adjusted); imshow("ss",adjusted); waitKey(); // int low_in, high_in, low_out, high_out // imadjust(gray, adjusted, 0, Vec2i(low_in, high_in), Vec2i(low_out, high_out)); return 0; } //int main111( int argc, char** argv ) //{ // IplImage *src=cvLoadImage("fish.png",1); // CvSize a; // a.width=src->width; // a.height=src->height; // IplImage *dst = cvCreateImage(a,8,3); // ImageAdjust(src, dst, // 0, 0.5, // 0.5, 1, 1) ; // Mat gray_image = cvarrToMat(dst); // imshow("ss",gray_image); // waitKey(); // return 0; //}