wu1

//wenjing BankGround
#if(1)
#include<highgui.hpp>
#include<imgproc.hpp>
#include<iostream>
#include<opencv2\video\background_segm.hpp>
using namespace cv;
using namespace std;


int main()
{

	VideoCapture capture;
	capture.open("D:\\vvoo1\\mov.avi");

	if (!capture.isOpened())
	{
		cout << "读取视频失败!" << endl;
		return -1;
	}


	Mat image;
	Mat background;
	Mat foreground;
	long stop = capture.get(CV_CAP_PROP_FRAME_COUNT);
	long count = 0;


	Ptr<BackgroundSubtractorMOG2> mog = createBackgroundSubtractorMOG2();

	Mat erode_kernel(3, 3, CV_8UC1);
	Mat dilate_kernel(5, 5, CV_8UC1);

	while (1){

		capture >> image;
		count++;

		if (count > stop)
			break;

		mog->apply(image, foreground, 0.01);
		mog->getBackgroundImage(background);

		//threshold(foreground, foreground, 80, 255, CV_THRESH_BINARY);

		erode(foreground, foreground, Mat());
		dilate(foreground, foreground, Mat());

		imshow("video", image);
		imshow("foreground", foreground);
		imshow("background", background);

		//提取外轮廓
		Mat apparence = Mat::zeros(image.size(), CV_8UC1);

		vector<vector<Point>> contours;
		findContours(foreground, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
		for (int i = 0; i < contours.size(); i++)
		{
			drawContours(foreground, contours, i, Scalar(255), 2);
			//计算面积
			double area = contourArea(Mat(contours[i]));
			if (area>300 && area<2000)
			{
				Rect rect = boundingRect(Mat(contours[i]));//
				rectangle(foreground, rect, Scalar(255), 2);
				//rectangle(apparence, rect, Scalar(255), 2);
				rectangle(image, rect, Scalar(255, 0, 0), 2);
			}
		}
		//imshow("foreground", foreground);
		//imshow("contours", apparence);
		imshow("image", image);

		waitKey(33);
	}


	return 0;
}
#endif
#if(0)
#include <opencv2/opencv.hpp>  
#include<iostream>
using namespace std;
using namespace cv;
void colorReduce(Mat& inputImage, Mat& outputImage, int div);

int main()
{
	
	Mat srcImage = imread("D:\\vvoo\\lena.jpg");
	imshow("原始图像", srcImage);
	
	Mat dstImage;
	dstImage.create(srcImage.rows, srcImage.cols, srcImage.type());//效果图的大小、类型与原图片相同 
	
	//记录起始时间
	double time0 = static_cast<double>(getTickCount());
	//调用颜色空间缩减函数
	colorReduce(srcImage, dstImage, 128);
	//计算运行时间并输出
	time0 = ((double)getTickCount() - time0) / getTickFrequency();
	cout << "此方法运行时间为: " << time0 << "秒" << endl;  //输出运行时间
	//显示效果图

	imshow("效果图", dstImage);
	waitKey(0);
	return 0;
}
//void colorReduce(Mat&inputImage, Mat&outputImage, int div)
//{
//	outputImage = inputImage.clone();
//	int rows = outputImage.rows;
//	int cols = outputImage.cols;
//	for (int i = 0; i < rows; i++)
//	{
//		for (int j = 0; j < cols; j++)
//		{
//			outputImage.at<Vec3b>(i, j)[0] = outputImage.at<Vec3b>(i, j)[0] / div*div + div / 2;
//			outputImage.at<Vec3b>(i, j)[1] = outputImage.at<Vec3b>(i, j)[1] / div*div + div / 2;
//			outputImage.at<Vec3b>(i, j)[2] = outputImage.at<Vec3b>(i, j)[2] / div*div + div / 2;
//		}
//	}
//
//}



void colorReduce(Mat& inputImage, Mat& outputImage, int div)
{
	outputImage = inputImage.clone();
	//模板必须指明数据类型  
	Mat_<Vec3b>::iterator it = outputImage.begin<Vec3b>();
	Mat_<Vec3b>::iterator itend = outputImage.end<Vec3b>();
	//也可以通过指明cimage类型的方法不写begin和end的类型  
	/*Mat_<Vec3b> cimage = outputImage;
	Mat_<Vec3b>::iterator itout =outputImage.begin<Vec3b>();
	Mat_<Vec3b>::iterator itoutend = cimage.end();*/
	for (; it != itend; it++/*, it++*/)
	{
		(*it)[0] = (*it)[0] / div*div + div / 2;
		(*it)[1] = (*it)[1] / div*div + div / 2;
		(*it)[2] = (*it)[2] / div*div + div / 2;
	}
}



//void colorReduce(Mat& inputImage, Mat& outputImage, int div)
//{
//	outputImage = inputImage.clone();
//	int rows = outputImage.rows;
//	int cols = outputImage.cols*outputImage.channels();
//	for (int i = 0; i < rows; i++)
//	{
//		/*uchar* data = inputImage.ptr<uchar>(i);*/
//		uchar* dataout = outputImage.ptr<uchar>(i);
//		for (int j = 0; j < cols; j++)
//		{
//			dataout[j] = dataout[j] / div*div + div / 2;
//		}
//	}
//
//}
#endif



/************************************************************************/
/*参考文献
基于彩色直方图和Kalman滤波的视频跟踪算法
Version 0.9
Written by Y. B. Mao
Visual Information Processing and Analysis Group (ViPAG),
Nanjing University of Sci. & Tech.
www.open-image.org
Feb. 9, 2006
All rights reserved.

Kalman滤波算法的详细描述,参见:
[1] 徐士良. C常用算法程序集. 清华大学出版社. 1994.

Mean shift跟踪算法的详细描述,请参见:
[1] D. Comaniciu, V. Ramesh, P. Meer. Real-time tracking of non-rigid
objects using mean shift. Proc. Conf. Vision Pattern Rec., II: 142-149,
Hilton Head, SC, June 2000.
[2] Dorin Comaniciu, Visvanathan Ramesh, Peter Meer. Kernel-based object
tracking. IEEE Trans. on Pattern Analysis and Machine Intelligence.
Vol.25, No. 5, 2003, pp. 554-577.
[3] Huimin QIAN, Yaobin MAO, Jason GENG, Zhiquan WANG. Object tracking with
self-updating tracking window. PAISI'2007.

[4]real-time Multiple Objects Tracking with Occlusion Handling in Dynamic Scenes

本代码只可用于非商业用途。如使用本代码请在论文中标注上述三篇参考文献。*/
/************************************************************************/

#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
#include <math.h>
#include <iostream>
#include<opencv2/opencv.hpp>
using namespace std;
using namespace cv;

#define B(image,x,y)     ((uchar*)(image->imageData + image->widthStep*(y)))[(x)*3]		//B
#define G(image,x,y)     ((uchar*)(image->imageData + image->widthStep*(y)))[(x)*3+1]	//G
#define R(image,x,y)     ((uchar*)(image->imageData + image->widthStep*(y)))[(x)*3+2]	//R
#define S(image,x,y)     ((uchar*)(image->imageData + image->widthStep*(y)))[(x)]	//便是该像素点所在行的首地址,然后再加上该像素点所在的列,即pt.x,就得到了该像素点的地址,
#define  Num 10  //帧差的间隔
#define  T 40    //Tf
#define Re 30     //
#define ai 0.08   //学习率
#define CONTOUR_MAX_AREA 10000
#define CONTOUR_MIN_AREA 50

# define R_BIN      8  /* 红色分量的直方图条数 */
# define G_BIN      8  /* 绿色分量的直方图条数 */
# define B_BIN      8  /* 兰色分量的直方图条数 */ 

# define R_SHIFT    5  /* 与上述直方图条数对应 */
# define G_SHIFT    5  /* 的R、G、B分量左移位数 */
# define B_SHIFT    5  /* log2( 256/8 )为移动位数 */


bool pause = false;//是否暂停
bool track = false;//是否跟踪
IplImage *curframe = NULL;
IplImage *pBackImg = NULL;
IplImage *pFrontImg = NULL;
IplImage *pTrackImg = NULL;
unsigned char * img;//把iplimg改到char*  便于计算
int xin, yin;//跟踪时输入的中心点
int xout, yout;//跟踪时得到的输出中心点
int Wid, Hei;//图像的大小
int WidIn, HeiIn;//输入的半宽与半高
int WidOut, HeiOut;//输出的半宽与半高

/*
用全选主元Gauss-Jordan法求n阶实矩阵A的逆矩阵A^{-1}
输入参数:
double * a:     原矩阵,为一个方阵
int n:          矩阵维数
输出参数:
double * a:     求得的逆矩阵
返回值:
如果返回标记为0,表示矩阵奇异;否则返回非0值
*/
int brinv(double * a, int n)
{
	int * is, *js, i, j, k, l, u, v;
	double d, p;

	is = (int *)malloc(n*sizeof(int));
	js = (int *)malloc(n*sizeof(int));
	for (k = 0; k < n; k++)
	{
		d = 0.0;
		for (i = k; i < n; i++)
		for (j = k; j < n; j++)
		{
			l = i*n + j;
			p = fabs(a[l]);
			if (p > d)
			{
				d = p; is[k] = i; js[k] = j;
			}
		}
		if (d + 1.0 == 1.0) /* 矩阵为奇异阵 */
		{
			free(is);
			free(js);
			// printf("err**not inv\n");
			return(0);
		}
		if (is[k] != k)
		for (j = 0; j < n; j++)
		{
			u = k*n + j;
			v = is[k] * n + j;
			p = a[u]; a[u] = a[v]; a[v] = p;
		}
		if (js[k] != k)
		for (i = 0; i < n; i++)
		{
			u = i*n + k;
			v = i*n + js[k];
			p = a[u]; a[u] = a[v]; a[v] = p;
		}
		l = k*n + k;
		a[l] = 1.0 / a[l];
		for (j = 0; j < n; j++)
		if (j != k)
		{
			u = k*n + j;
			a[u] = a[u] * a[l];
		}
		for (i = 0; i < n; i++)
		if (i != k)
		for (j = 0; j < n; j++)
		if (j != k)
		{
			u = i*n + j;
			a[u] = a[u] - a[i*n + k] * a[k*n + j];
		}
		for (i = 0; i < n; i++)
		if (i != k)
		{
			u = i*n + k;
			a[u] = -a[u] * a[l];
		}
	}
	for (k = n - 1; k >= 0; k--)
	{
		if (js[k] != k)
		for (j = 0; j <= n - 1; j++)
		{
			u = k*n + j;
			v = js[k] * n + j;
			p = a[u]; a[u] = a[v]; a[v] = p;
		}
		if (is[k] != k)
		for (i = 0; i < n; i++)
		{
			u = i*n + k;
			v = i*n + is[k];
			p = a[u]; a[u] = a[v]; a[v] = p;
		}
	}
	free(is);
	free(js);

	return(1);
}


/*
计算一幅图像中某个区域的彩色直方图核密度估计
输入参数:
int x0, y0:           指定图像区域的中心点
int Wx, Hy:           指定图像区域的半宽和半高
unsigned char * image:图像数据,按从左至右,从上至下的顺序扫描,
颜色排列次序:RGB, RGB, ...
(或者:YUV, YUV, ...)
int W, H:             图像的宽和高
float * Kernel:       核权重,数组维数为[(2*Wx+1)*(2*Hy+1)]
float & C_k:          上述权重的和(归一化系数)
输出参数:
float * ColorHist:    彩色直方图,颜色索引按:
i = r * G_BIN * B_BIN + g * B_BIN + b排列
int bins:             彩色直方图的条数R_BIN*G_BIN*B_BIN(这里取8x8x8=512)
*/
void CalcuColorHistogram(int x0, int y0, int Wx, int Hy, unsigned char * image, int W, int H, float * Kernel, float C_k,
	float * ColorHist, int bins)//只是没有把RGB颜色直方图画出来
{
	int x_begin, y_begin;  /* 指定图像区域的左上角坐标 */
	int y_end, x_end;
	int index, Wk;
	int r, g, b;

	Wk = 2 * Wx + 1;   /* 核函数的区域宽度(不受被跟踪区域影响) */
	for (int i = 0; i < bins; i++)     /* 直方图各个值赋0 */
		ColorHist[i] = 0.0;
	/* 考虑特殊情况:x0, y0在图像外面,或者,Wx<=0, Hy<=0 */
	/* 此时强制令彩色直方图为0 */
	if ((x0 < 0) || (x0 >= W) || (y0 < 0) || (y0 >= H)
		|| (Wx <= 0) || (Hy <= 0)) return;

	x_begin = x0 - Wx;               /* 计算实际高宽和区域起始点 */
	y_begin = y0 - Hy;
	if (x_begin < 0) x_begin = 0;
	if (y_begin < 0) y_begin = 0;
	x_end = x0 + Wx;
	y_end = y0 + Hy;
	if (x_end >= W) x_end = W - 1;
	if (y_end >= H) y_end = H - 1;

	for (int y = y_begin; y <= y_end; y++)       /* 计算直方图 */
	for (int x = x_begin; x <= x_end; x++)
	{
		r = image[(y*W + x) * 3] >> R_SHIFT;   /*移位位数根据R、G、B条数 */
		g = image[(y*W + x) * 3 + 1] >> G_SHIFT;
		b = image[(y*W + x) * 3 + 2] >> B_SHIFT;
		index = r * G_BIN * B_BIN + g * B_BIN + b;//量化512个bins
		/* 计算核密度加权彩色直方图 */
		ColorHist[index] += Kernel[(y - y_begin)*Wk + (x - x_begin)];//核函数在该像素点的核权重
	}
	for (int i = 0; i < bins; i++)     /* 归一化直方图 */
		ColorHist[i] = ColorHist[i] / C_k;
	return;
}

/*
给定一个矩形,计算其各个像素的Epanechnikov核
输入参数:
int Wx, Hy:           指定图像区域的半宽和半高
输出参数:
float * Kernel:       核权重,数组维数为[(2*Wx+1)*(2*Hy+1)]
须在主函数中申请
float & C_k:          上述权重的和(归一化系数)
返回值:
该窗口中的总像素个数,如果返回值<=0则说明函数调用失败
*/
int CalcuEpanechnikovKernel(int Wx, int Hy, float * Kernel, float & C_k)  //求核函数权重
{
	int xP, yP, PixelNo;
	int a2;
	float r2, k;

	xP = Wx * 2 + 1;
	yP = Hy * 2 + 1;
	PixelNo = xP * yP;
	if (PixelNo <= 0)
		return(0);
	for (int i = 0; i < PixelNo; i++)  /* 权重数组各个值赋0 */
		Kernel[i] = 0.0;

	a2 = Wx*Wx + Hy*Hy;                /* 计算核函数半径的平方a^2 */
	C_k = 0.0;                         /* 归一化系数赋0 */

	for (int y = 0; y < yP; y++)
	for (int x = 0; x < xP; x++)
	{
		r2 = (float)(((y - Hy)*(y - Hy) + (x - Wx)*(x - Wx))*1.0 / a2); /* 计算半径平方r^2 */
		k = 1 - r2;   /* 核函数k(r) = 1-r^2, |r| < 1; 其他值 k(r) = 0 */
		C_k = C_k + k;    //权重的和(归一化系数)
		Kernel[y*xP + x] = k;  /* 保存核函数元素 */
	}
	if (C_k < 0.0000001) return(0); /* C_k不应该很小接近于0 */

	return(PixelNo);
}


/*
计算模板直方图的核估计
输入参数:
int xt, yt:             输入目标区域的中心点
int Wx, Hy:             目标区域的半宽和半高
unsigned char * image:  图像数据,按从左至右,从上至下的顺序扫描,
颜色排列次序:RGB, RGB, ...
int W, H:               图像的宽和高
float * ModelHist:      模板彩色直方图密度分布
int bins:               直方图条数
*/
void CalcuModelHist(int xt, int yt, int Wx, int Hy, unsigned char * image, int W, int H, float * ModelHist, int bins)
{
	float * Kernel, C_k;
	int xp, yp, PixelNo;

	xp = 2 * Wx + 1;
	yp = 2 * Hy + 1;
	PixelNo = xp * yp;
	Kernel = new float[PixelNo];//核权重

	/* 计算模板目标核估计 */
	PixelNo = CalcuEpanechnikovKernel(Wx, Hy, Kernel, C_k);//调用
	/* 计算模板目标直方图核估计 */
	CalcuColorHistogram(xt, yt, Wx, Hy, image, W, H, Kernel, C_k, ModelHist, bins);

	delete[] Kernel;

	return;
}

/*
初始化mean shift跟踪器
输入参数:
int x0, y0:             输入目标区域的中心点
int Wx, Hy:             目标区域的半宽和半高
unsigned char * image:  图像数据,按从左至右,从上至下的顺序扫描,
颜色排列次序:RGB, RGB, ...
int W, H:               图像的宽和高
float DeltaT:           视频采样间隔,为1/采样率,采样率一般为10, 15, 20, 25, 30 fps
*/
float * Model_Hist;//模板彩色直方图密度分布
int bins;
float * Qk;   // 状态阵协方差(模型噪声)
float * Rk;   // 观测阵协方差(观测噪声)
float * Pk;   // 状态测量误差协方差
float * Am;   // 状态转移阵
float * Hm;   // 观测阵
float * yk;   // 观测量
float * xk;   // 状态量
float deltat; // 视频采样时间间隔

void Initial_MeanShift_tracker(int x0, int y0, int Wx, int Hy, unsigned char * image, int W, int H, float DeltaT)
{
	bins = R_BIN * G_BIN * B_BIN;
	Model_Hist = new float[bins];//新数组

	CalcuModelHist(x0, y0, Wx, Hy, image, W, H, Model_Hist, bins); /*调用   计算模板直方图 */

	/* 状态误差协方差矩阵 */
	Qk = new float[4 * 4];   // 状态阵协方差
	for (int y = 0; y < 4; y++)
	for (int x = 0; x < 4; x++)
		Qk[y * 4 + x] = 0.0;
	for (int y = 0; y < 4; y++)
		Qk[y * 4 + y] = 1.0;  /* 对角矩阵的协方差都为1.0,且相互独立 */

	// 观测阵协方差(观测噪声)
	Rk = new float[2 * 2];  // 观测阵协方差
	for (int y = 0; y < 2; y++)
	for (int x = 0; x < 2; x++)
		Rk[y * 2 + x] = 0.0;
	for (int y = 0; y < 2; y++)
		Rk[y * 2 + y] = 1.0;  /* 协方差都为1.0,且相互独立 */

	Pk = new float[4 * 4];   // 状态测量误差协方差
	for (int y = 0; y < 4; y++)
	for (int x = 0; x < 4; x++)
		Pk[y * 4 + x] = 0.0;
	for (int y = 0; y < 4; y++)
		Pk[y * 4 + y] = 10.0;  /* 初始协方差都为10,且相互独立 */

	Am = new float[4 * 4];     // 状态转移阵
	for (int y = 0; y < 4; y++)
	for (int x = 0; x < 4; x++)
		Am[y * 4 + x] = 0.0;
	for (int y = 0; y < 4; y++)
		Am[y * 4 + y] = 1.0;   /* 对角为1 */

	Am[0 * 4 + 2] = DeltaT;
	Am[1 * 4 + 3] = DeltaT;

	Hm = new float[2 * 4];     // 观测
	for (int y = 0; y < 2; y++)
	for (int x = 0; x < 4; x++)
		Hm[y * 4 + x] = 0.0;
	Hm[0 * 4 + 0] = 1.0; Hm[1 * 4 + 1] = 1.0;

	yk = new float[2];   // 观测量,排列顺序:x, y
	yk[0] = (float)x0;
	yk[1] = (float)y0;
	/* 初始化运动速度、位置、半窗宽高、尺度变化速度等状态量 */

	xk = new float[4]; // 状态量,排列顺序:x, y, xv, yv
	xk[0] = (float)x0;
	xk[1] = (float)y0;
	xk[2] = 0.0;
	xk[3] = 0.0;

	deltat = DeltaT;
	cout << "deltat= " << deltat << endl;
	return;
}

/*
计算Bhattacharyya系数
输入参数:
float * p, * q:      两个彩色直方图密度估计
int bins:            直方图条数
返回值:
Bhattacharyya系数
*/
float CalcuBhattacharyya(float * p, float * q, int bins)
{
	float rho = 0.0;
	for (int i = 0; i < bins; i++)
		rho = (float)(rho + sqrt(p[i] * q[i]));

	return(rho);
}

/*
给定窗口尺寸的mean shift迭代
输入参数:
int xi, yi:             输入预测的目标区域的中心点
int Wx, Hy:             指定目标区域的半宽和半高
unsigned char * image:  图像数据,按从左至右,从上至下的顺序扫描,
颜色排列次序:RGB, RGB, ...
(或者:YUV, YUV, ...)
int W, H:               图像的宽和高
float * ModelHist:      模板彩色直方图密度分布
int bins:               直方图条数
输出参数:
int & xo, & yo:         mean shift计算得到的目标中心点
float & rho:            在目标中心点处的Bhattacharyya系数
返回值:
迭代的次数,如果为负则表示调用出错
*/
# define MAX_ITERATE_TIMES     20  /* 最多迭代次数 */

int Mean_shift_iteration(int xi, int yi, int Wx, int Hy, unsigned char * image, int W, int H, float * ModelHist, int bins,
	int & xo, int & yo, float & rho)
{
	int i, p_idx;
	float * Kernel, *ColorHist, C_k;
	float * w_i, *index_weight, sum_wi;
	int xp, yp, PixelNo;
	int x_begin, x_end, y_begin, y_end;
	int r, g, b, indx;
	float xo_f, yo_f, rho1, err;
	int flag; /* 是否需要重新计算rho的标记 */

	xp = 2 * Wx + 1;
	yp = 2 * Hy + 1;
	PixelNo = xp * yp;
	Kernel = new float[PixelNo]; /* 申请核函数内存 */
	w_i = new float[PixelNo];    /* 申请权重内存 */
	index_weight = new float[bins]; /* 申请索引权重空间 */
	ColorHist = new float[bins]; /* 申请跟踪目标的彩色直方图空间 */

	/* 计算目标区域核估计 */
	PixelNo = CalcuEpanechnikovKernel(Wx, Hy, Kernel, C_k);

	/* mean shift迭代 */
	flag = 1; /* 初始flag = 1,总计算直方图和Bhattacharyya系数 */
	for (i = 0; i < MAX_ITERATE_TIMES; i++)
	{
		/* 1. 计算候选目标彩色直方图 */
		if (flag == 1)
		{
			CalcuColorHistogram(xi, yi, Wx, Hy, image, W, H, Kernel, C_k, ColorHist, bins);
			/* 计算Bhattacharyya系数 */
			rho = CalcuBhattacharyya(ColorHist, ModelHist, bins);
		}
		else
		{
			rho = rho1;
		}

		/* 2. 计算权重w_i */
		for (int x = 0; x < PixelNo; x++)  /* 清空权重存储空间 */
			w_i[x] = 0.0;
		for (int x = 0; x < bins; x++)     /* 计算每个索引权重 */
		{
			index_weight[x] = (float)(ColorHist[x] > 0.0000001 ? sqrt(ModelHist[x] / ColorHist[x]) : 0.0);
		}

		x_begin = xi - Wx;               /* 计算实际高宽和区域起始点 */
		y_begin = yi - Hy;
		x_begin = x_begin < 0 ? 0 : x_begin;
		y_begin = y_begin < 0 ? 0 : y_begin;

		x_end = xi + Wx;
		y_end = yi + Hy;
		x_end = x_end >= W ? x_end = W - 1 : x_end;
		y_end = y_end >= H ? y_end = H - 1 : y_end;

		sum_wi = 0.0;
		xo_f = 0.0; yo_f = 0.0;
		for (int y = y_begin; y <= y_end; y++)       /* 计算权重 */
		for (int x = x_begin; x <= x_end; x++)
		{
			r = image[(y*W + x) * 3] >> R_SHIFT;   /*移位位数根据R、G、B条数 */
			g = image[(y*W + x) * 3 + 1] >> G_SHIFT;
			b = image[(y*W + x) * 3 + 2] >> B_SHIFT;
			indx = r * G_BIN * B_BIN + g * B_BIN + b;
			p_idx = (y - y_begin)*xp + (x - x_begin);
			/* 计算权重w_i */
			w_i[p_idx] = index_weight[indx];//每一个bins相比较的值?也就是每一个bin相对的偏移量
			sum_wi += index_weight[indx];//偏移量值累计
			/* 3. 根据均值偏移计算新的位置 */
			xo_f += x * w_i[p_idx];
			yo_f += y * w_i[p_idx];
		}
		xo = (int)(xo_f / sum_wi + 0.5);
		yo = (int)(yo_f / sum_wi + 0.5);
		/* 更新候选目标彩色直方图 */
		CalcuColorHistogram(xo, yo, Wx, Hy, image, W, H, Kernel, C_k, ColorHist, bins);
		/* 重新估算Bhattacharyya系数 */
		rho1 = CalcuBhattacharyya(ColorHist, ModelHist, bins);
		/* 4. 如果rho1 < rho, xo = (xo+xi)/2; yo = (yo+yi)/2 */
		if (rho1 < rho)
		{
			xo = (xo + xi) / 2;
			yo = (yo + yi) / 2;
			flag = 1;
		}
		else
		{
			flag = 0;

			/* 5. 判断是否中止迭代 */
			err = (float)(abs(xo - xi) + abs(yo - yi));
			if (err <= 1.0)
			{
				rho = rho1;
				break;
			}
			else
			{
				xi = xo; yi = yo; /* 获得新的中心点,准备下论迭代 */
			}
		}
	}

	delete[] Kernel;
	delete[] w_i;
	delete[] index_weight;
	delete[] ColorHist;

	return(i + 1);
}

/*
一步Kalman滤波程序
对n维线性动态系统与m维线性观测系统
状态方程:X_k = (A_k,k-1)*X_k-1 + W_k-1
观测方程:Y_k = H_k*X_k + V_k
k = 1,2,...
X_k为n维状态向量,              Y_k为m维观测向量。
(A_k,k-1)(nxn维)为状态转移阵,   H_k(nxm维)为观测矩阵
W_k为n维状态噪声向量,一般假设为高斯白噪声,且均值为0,协方差为Q_k
V_k为m维观测噪声向量,一般假设为高斯白噪声,且均值为0,协方差为R_k

Kalman滤波问题就是在已知k个观测向量Y_0,Y_1,...,Y_k和初始状态向量估计X_0
及其估计误差协方差阵P_0,以及Q_k,R_k等情况下,递推估计各个x_k及其噪声
估计协方差阵P_k的问题。具体计算公式如下:

P_k|k-1 = (A_k,k-1 )* P_k-1 * (A_k,k-1)^T + Q_k-1 //状态预估计协方差矩阵
K_k = P_k|k-1 * H_k^T * [H_k * P_k|k-1 * H_k^T + R_k]^{-1}
X_k = A_k,k-1 * X_k-1 + K_k * [Y_k - H_k * A_k,k-1 * X_k-1] //更新状态向量估计
P_k = (I-K_k*H_k)*P_k|k-1 //更新状态估计误差协方差矩阵
其中:
K_k(nxm维)为Kalman增益矩阵
Q_k(nxn维)为W_k协方差阵
R_k(mxm维)为V_k协方差阵
P_k(nxn维)为估计误差协方差阵

一步Kalman滤波函数参数:
n:     整型变量,动态系统的维数(状态量个数)
m:     整型变量,观测系统的维数(观测量个数)
A:     双精度2维数组,nxn,系统状态转移矩阵
H:     双精度2维数组,mxn,观测矩阵
Q:     双精度2维数组,nxn,模型噪声W_k的协方差矩阵
R:     双精度2维数组,mxm,观测噪声V_k的协方差矩阵
y_k:   双精度2维数组,mx1,观测向量序列
x_k:   双精度2维数组,nx1,状态向量估计量序列。输入x_k-1,返回x_k
P_k:   双精度2维数组,nxn,存放误差协方差阵P_k-1。返回时存放更新的估计误差协
方差阵P_k
输出:  x_k, P_k
函数返回值:
若返回0,说明求增益矩阵过程中求逆失败;若返回非0,表示正常返回

Kalman算法的优化:
如果观测误差协方差Q_k-1和测量误差协方差R_k近似为常数,则
观测误差协方差 P_k|k-1 = A_k,k-1 * P_k-1 * A_k,k-1^T + Q_k-1 近似为常数;
这样,K_k也近似为常数,P的更新P_k = ( I - K_k*H_k ) * P_k|k-1 也近似不变
上面的三个量P_k|k-1, K_k, P_k都可以离线算出!
这个程序没有这么优化,因此更通用一些。
*/

int Kalman(int n, int m, float * A, float * H, float * Q, float * R,
	float * y_k, float * x_k, float * P_k)
{
	float * Ax, *PH, *P, *P_kk_1, temp, *K_k;
	float * yHAx, *KH, *I;
	double * HPHR;
	int x, y, i;
	int invRval;

	P = new float[n*n];
	P_kk_1 = new float[n*n];

	/* 1.状态误差协方差的预测 P_k|k-1 */
	for (y = 0; y < n; y++)  /* A_k,k-1*P_k-1 */
	for (x = 0; x < n; x++)
	{
		temp = 0;
		for (i = 0; i < n; i++)
			temp += A[y*n + i] * P_k[i*n + x];
		P[y*n + x] = temp;
	}
	for (y = 0; y < n; y++)  /* (A_k,k-1*P_k-1)*A_k,k-1^T+Q_k-1 */
	for (x = 0; x < n; x++)
	{
		temp = 0;
		for (i = 0; i < n; i++)
			temp += P[y*n + i] * A[x*n + i];//A为状态转移阵的转置
		P_kk_1[y*n + x] = temp + Q[y*n + x];
	}

	/* 2.求增益矩阵 K_k */
	PH = new float[n*m];
	for (y = 0; y < n; y++) /* P_k|k-1*H_k^T */
	for (x = 0; x < m; x++)
	{
		temp = 0;
		for (i = 0; i < n; i++)
			temp += P_kk_1[y*n + i] * H[x*m + i];
		PH[y*m + x] = temp;//nxm维数
	}
	HPHR = new double[m*m]; /* 求H_k*(P_k|k-1*H_k)^T+R_k */
	for (y = 0; y < m; y++)
	for (x = 0; x < m; x++)
	{
		temp = 0;
		for (i = 0; i < n; i++)
			temp += H[y*n + i] * PH[i*m + x];//mxm维数
		HPHR[y*m + x] = temp + R[y*m + x];//mxm维数
	}
	invRval = brinv(HPHR, m); /* 求逆 */
	if (invRval == 0)
	{
		delete[] P;
		delete[] P_kk_1;
		delete[] PH;
		delete[] HPHR;
		return(0);
	}
	K_k = new float[n*m]; /* 求K_k */
	for (y = 0; y < n; y++)
	for (x = 0; x < m; x++)
	{
		temp = 0;
		for (i = 0; i < m; i++)
			temp += PH[y*m + i] * (float)HPHR[i*m + x];//n*m x m*n
		K_k[y*m + x] = temp;//n*n
	}
	/* 3.求状态的估计 x_k */
	Ax = new float[n];
	for (y = 0; y < n; y++) /* A_k,k-1 * x_k-1 */
	{
		temp = 0;
		for (i = 0; i < n; i++)
			temp += A[y*n + i] * x_k[i];
		Ax[y] = temp;//n*1
	}
	yHAx = new float[m];
	for (y = 0; y < m; y++) /* y_k - H_k*(A_k,k-1)*x_k-1 */
	{
		temp = 0;
		for (i = 0; i < n; i++)
			temp += H[y*n + i] * Ax[i];//m*n x n*1=m*1
		yHAx[y] = y_k[y] - temp;//m*1
	}
	for (y = 0; y < n; y++)   /* 求x_k */
	{
		temp = 0;
		for (i = 0; i < m; i++)
			temp += K_k[y*m + i] * yHAx[i];//n*m x m*1
		x_k[y] = Ax[y] + temp;//n*1
	}
	/* 4.更新误差的协方差 P_k */
	KH = new float[n*n];
	for (y = 0; y < n; y++)
	for (x = 0; x < n; x++)
	{
		temp = 0;
		for (i = 0; i < m; i++)
			temp += K_k[y*m + i] * H[i*n + x];//n*m xm*n=n*n
		KH[y*n + x] = temp;//n*n
	}
	I = new float[n*n];
	for (y = 0; y < n; y++)
	for (x = 0; x < n; x++)
		I[y*n + x] = (float)(x == y ? 1 : 0);//对角线为1的n*n矩阵

	for (y = 0; y < n; y++)   /* I - K_k*H_k */
	for (x = 0; x < n; x++)
		I[y*n + x] = I[y*n + x] - KH[y*n + x];//n*n

	for (y = 0; y < n; y++)  /* P_k = ( I - K_k*H_k ) * P_k|k-1 */
	for (x = 0; x < n; x++)
	{
		temp = 0;
		for (i = 0; i < n; i++)
			temp += I[y*n + i] * P_kk_1[i*n + x];//n*n x n*n=n*n
		P_k[y*n + x] = temp;
	}

	delete[] P;
	delete[] P_kk_1;
	delete[] PH;
	delete[] HPHR;
	delete[] K_k;
	delete[] Ax;
	delete[] yHAx;
	delete[] KH;
	delete[] I;

	return(1);
}

/*
mean shift跟踪主函数
输入参数:
int xin, yin:             输入目标区域的中心点
int Win, Hin:             目标区域的半宽和半高
unsigned char * image:    图像数据,按从左至右,从上至下的顺序扫描,
颜色排列次序:RGB, RGB, ...
int W, H:                 图像的宽和高
输出参数:
int & xout, & yout:       检测得到的目标区域的中心点
int & Wout, & Hout:       检测得到的目标区域的半宽和半高
float & rho_out:          求得的相关系数rho
返回值:
float rho:                Bhattacharyya系数
*/
# define Delta_h     0.1
# define GAMMA       0.5

float MeanShift_tracker(int xin, int yin, int Win, int Hin, unsigned char * image, int W, int H,
	int & xout, int & yout, int & Wout, int & Hout)
{
	int rv;
	int xin1, yin1, Win1, Hin1;
	float rho1, rho3, rho;
	int W1, H1, xo1, yo1, W3, H3, xo3, yo3;

	/* 预测:根据前1次(设为k-1次)的滤波输出,预测出本次的位置和窗宽 */
	xin1 = (int)(xin + xk[2] * deltat + 0.5);
	yin1 = (int)(yin + xk[3] * deltat + 0.5);
	if (xin1 < 0) xin1 = 0;
	if (xin1 >= W) xin1 = W - 1;
	if (yin1 < 0) yin1 = 0;
	if (yin1 >= H) yin1 = H - 1;

	Win1 = Win;
	Hin1 = Hin;
	if (Win1 <= 0) Win1 = 1;
	if (Hin1 <= 0) Hin1 = 1;
	if (Win1 >= W / 3) Win1 = W / 3; /* 目标宽度不超过图像宽度的2/3 */
	if (Hin1 >= H / 3) Hin1 = H / 3; /* 目标高度不超过图像高度的2/3 */

	Wout = Win1;                         /* 窗口为原窗口 h_prew    */
	Hout = Hin1;
	rv = Mean_shift_iteration(xin1, yin1, Wout, Hout, image, W, H, Model_Hist, bins, xout, yout, rho);

	/* 观测:利用mean shift,以预测值为起点求新的观测值 */
	W1 = (int)(Win1*(1.0 - Delta_h) + 0.5); /* 目标区域的窗口半宽和半高,为h_prew - delta_h */
	H1 = (int)(Hin1*(1.0 - Delta_h) + 0.5);
	rv = Mean_shift_iteration(xin1, yin1, W1, H1, image, W, H, Model_Hist, bins, xo1, yo1, rho1);



	W3 = (int)(Win1*(1.0 + Delta_h) + 0.5); /* 窗口为h_prew + delta_h */
	H3 = (int)(Hin1*(1.0 + Delta_h) + 0.5);
	rv = Mean_shift_iteration(xin1, yin1, W3, H3, image, W, H, Model_Hist, bins, xo3, yo3, rho3);

	if (rho < rho1)
	{
		rho = rho1;
		xout = xo1; yout = yo1;
		Wout = W1; Hout = H1;
	}
	if (rho < rho3)
	{
		rho = rho3;
		xout = xo3; yout = yo3;
		Wout = W3; Hout = H3;
	}
	/* 最后得到的窗宽是 GAMMA * h_opt + (1-GAMMA) * h_prew */
	Wout = (int)(GAMMA * Wout + (1 - GAMMA) * Win + 0.5);
	Hout = (int)(GAMMA * Hout + (1 - GAMMA) * Hin + 0.5);
	/* Kalman滤波:更新yk,进行滤波,得到新的xk估计(k时刻) */
	yk[0] = (float)xout; yk[1] = (float)yout;
	rv = Kalman(4, 2, Am, Hm, Qk, Rk, yk, xk, Pk); /* 进行Kalman滤波 */

	return(rho);
}


//int Wid,Hei;每一个像素进行抽样??
void IplToImge(IplImage* src, int w, int h)
{
	for (int j = 0; j < h; j++) // 转成正向图像
	for (int i = 0; i < w; i++)
	{
		img[(j*w + i) * 3] = R(src, i, j);
		img[(j*w + i) * 3 + 1] = G(src, i, j);
		img[(j*w + i) * 3 + 2] = B(src, i, j);
	}
}
/*
清除申请的内存等
*/
void Clear_MeanShift_tracker()
{
	if (Model_Hist != NULL)
		delete[] Model_Hist;
	delete[] Qk;   // 状态阵协方差
	delete[] Rk;   // 观测阵协方差
	delete[] Pk;   // 状态测量误差协方差
	delete[] Am;   // 状态转移阵
	delete[] Hm;   // 观测阵
	delete[] yk;   // 观测量
	delete[] xk;   // 状态量
}

void mouseHandler(int event, int x, int y, int flags, void* param)//在这里要注意到要再次调用cvShowImage,才能显示方框
{
	CvMemStorage* storage = cvCreateMemStorage(0);//分配内存
	CvSeq * contours;//分配序列
	IplImage* pFrontImg1 = 0;
	int centerX, centerY;
	int delt = 10;
	pFrontImg1 = cvCloneImage(pFrontImg);//这里也要注意到如果在 cvShowImage("foreground",pFrontImg1)中用pFrontImg产效果,得重新定义并复制
	switch (event)
	{
	case CV_EVENT_LBUTTONDOWN:
		//寻找轮廓
		if (pause)//初始为假
		{
			cvFindContours(pFrontImg, storage, &contours, sizeof(CvContour), CV_RETR_EXTERNAL,
				CV_CHAIN_APPROX_SIMPLE);

			//在原场景中绘制目标轮廓的外接矩形
			for (; contours; contours = contours->h_next)
			{
				CvRect r = ((CvContour*)contours)->rect;//标记框
				if (x>r.x&&x<(r.x + r.width) && y>r.y&&y<(r.y + r.height))
				{
					if (r.height*r.width<CONTOUR_MAX_AREA && r.height*r.width>CONTOUR_MIN_AREA)
					{
						centerX = r.x + r.width / 2;//得到目标中心点
						centerY = r.y + r.height / 2;
						WidIn = r.width / 2;//得到目标半宽与半高
						HeiIn = r.height / 2;
						xin = centerX;
						yin = centerY;
						cvRectangle(pFrontImg1, Point(r.x, r.y), Point(r.x + r.width, r.y + r.height), Scalar(255, 0, 255), 2, 8, 0);

						Initial_MeanShift_tracker(centerX, centerY, WidIn, HeiIn, img, Wid, Hei, float(1. / delt));  //调用初始化跟踪变量
						track = true;//进行跟踪
						cvShowImage("foreground", pFrontImg1);
						return;

					}
				}

			}
		}
		break;
	case CV_EVENT_LBUTTONUP:
		cout << "Left button up" << endl;
		break;
	}
}

void main()
{
	int FrameNum = 0;  //帧号s
	int k = 0;
	CvCapture *capture = cvCreateFileCapture("D:\\vvoo1\\mov.avi");
	IplImage* frame[Num]; //Num=10 用来存放图像
	float rho_v;//表示相似度

	int sum = 0;    //用来存放两图像帧差后的值
	for (int i = 0; i<Num; i++)//Num=10
	{
		frame[i] = NULL;
	}

	IplImage *curFrameGray = NULL;//
	IplImage *frameGray = NULL;//

	CvMat *Mat_D, *Mat_F;   //动态矩阵与帧差后矩阵
	int row, col;

	cvNamedWindow("video", 1);
	cvNamedWindow("background", 1);
	cvNamedWindow("foreground", 1);
	cvNamedWindow("tracking", 1);

	while (capture)
	{
		curframe = cvQueryFrame(capture); //抓取一帧
		if (curframe == NULL)
		{
			return;
		}
		if (FrameNum<Num)//Num = 10
		{
			if (FrameNum == 0)//第一帧时初始化过程
			{
				curFrameGray = cvCreateImage(cvGetSize(curframe), IPL_DEPTH_8U, 1);//创建矩阵
				frameGray = cvCreateImage(cvGetSize(curframe), IPL_DEPTH_8U, 1);
				pBackImg = cvCreateImage(cvGetSize(curframe), IPL_DEPTH_8U, 1);
				pFrontImg = cvCreateImage(cvGetSize(curframe), IPL_DEPTH_8U, 1);

				cvSetZero(pFrontImg);//前景矩阵清零
				cvCvtColor(curframe, pBackImg, CV_RGB2GRAY);//背景矩阵灰度图像

				row = curframe->height;
				col = curframe->width;

				Mat_D = cvCreateMat(row, col, CV_32FC1);//创建与当前帧大小相同矩阵、、动态矩阵
				cvSetZero(Mat_D);
				Mat_F = cvCreateMat(row, col, CV_32FC1);//当前帧的矩阵
				cvSetZero(Mat_F);

				Wid = curframe->width;
				Hei = curframe->height;
				img = new unsigned char[Wid * Hei * 3];//使用new关键字分配 (Wid * Hei * 3 )个unsigned char类型的内存赋予img
			}
			frame[k] = cvCloneImage(curframe);  //把第一帧(前num帧)存入到图像数组
		}
		else
		{
			k = FrameNum%Num;//FrameNum初始为10,Num为10,此时k=0;
			pTrackImg = cvCloneImage(curframe);//pTrackImg指向当前帧curframe
			if (pTrackImg == NULL)
			{
				return;
			}
			IplToImge(curframe, Wid, Hei);//调用函数,把R、G、B的curframe帧赋给img指针数组
			cvCvtColor(curframe, curFrameGray, CV_RGB2GRAY);//将当前帧转当前帧灰度图
			/////
			cvCvtColor(frame[k], frameGray, CV_RGB2GRAY);
			/////遍历整幅图像
			for (int i = 0; i<curframe->height; i++)
			for (int j = 0; j<curframe->width; j++)
			{
				sum = S(curFrameGray, j, i) - S(frameGray, j, i);//相差10帧的帧差?
				sum = sum<0 ? -sum : sum;
				///////////////////////////////////////////////
				if (sum>T)   //40 文献中公式(1)
				{
					CV_MAT_ELEM(*Mat_F, float, i, j) = 1;//用来访问矩阵每个元素的宏,这个宏只对单通道矩阵有效,赋值?
				}
				else
				{
					CV_MAT_ELEM(*Mat_F, float, i, j) = 0;//帧帧差分矩阵
				}

				if (CV_MAT_ELEM(*Mat_F, float, i, j) != 0)//文献中公式(2)

					CV_MAT_ELEM(*Mat_D, float, i, j) = Re;//30
				else
				{
					if (CV_MAT_ELEM(*Mat_D, float, i, j) != 0)

						CV_MAT_ELEM(*Mat_D, float, i, j) = CV_MAT_ELEM(*Mat_D, float, i, j) - 1;
				}

				if (CV_MAT_ELEM(*Mat_D, float, i, j) == 0.0)//若动态矩阵像素为零,基本上此像素判定为背景了
				{
					//文献中公式(3)更新背景
					S(pBackImg, j, i) = (uchar)((1 - ai)*S(pBackImg, j, i) + ai*S(curFrameGray, j, i));//ai为0.08学习率输出帧的权重
				}

				sum = S(curFrameGray, j, i) - S(pBackImg, j, i);//背景差分法,得到前景
				sum = sum<0 ? -sum : sum;
				if (sum>40)
				{
					S(pFrontImg, j, i) = 255;
				}
				else
					S(pFrontImg, j, i) = 0;

			}
			frame[k] = cvCloneImage(curframe);
		}
		FrameNum++;
		k++;
		cout << FrameNum << endl;
		cout << k << endl;

		//进行形态学滤波,去噪
		cvDilate(pFrontImg, pFrontImg, 0, 2);
		cvErode(pFrontImg, pFrontImg, 0, 3);
		cvDilate(pFrontImg, pFrontImg, 0, 1);

		if (track)//初始为false
		{
			/* 跟踪一帧 */
			rho_v = MeanShift_tracker(xin, yin, WidIn, HeiIn, img, Wid, Hei, xout, yout, WidOut, HeiOut);
			/* 画框: 新位置为蓝框 */
			if (rho_v > 0.8)  /* 判断是否目标丢失 */
			{
				cvRectangle(pFrontImg, Point(xout - WidOut, yout - HeiOut),
					Point(xout + WidOut, yout + HeiOut), Scalar(255, 0, 255), 2, 8, 0);

				cvRectangle(pTrackImg, Point(xout - WidOut, yout - HeiOut),
					Point(xout + WidOut, yout + HeiOut), Scalar(255, 0, 255), 2, 8, 0);
				xin = xout; yin = yout;
				WidIn = WidOut; HeiIn = HeiOut;
			}
		}
		cvShowImage("video", curframe);
		cvShowImage("foreground", pFrontImg);
		cvShowImage("background", pBackImg);
		cvShowImage("tracking", pTrackImg);

		uchar key = false;      //用来设置暂停
		key = cvWaitKey(1);
		if (key == 'p')
			pause = true;
		cvSetMouseCallback("foreground", mouseHandler, 0);//响应鼠标
		while (pause)
		if (cvWaitKey(0) == 'p')
			pause = false;

	}
	cvReleaseImage(&curFrameGray);
	cvReleaseImage(&frameGray);
	cvReleaseImage(&pBackImg);
	cvReleaseImage(&pFrontImg);
	cvDestroyAllWindows();
	Clear_MeanShift_tracker();
}


### 兼容性与替代型号推荐 LP240WU1-SPCA 是一款 24 英寸 WUXGA(1920x1200)分辨率的 IPS 面板,由 LG Display 制造。由于其特定的尺寸、分辨率和面板类型,寻找兼容或替代型号时需考虑多个关键参数,包括接口类型(如 eDP 或 LVDS)、背光配置、驱动 IC 型号以及机械尺寸是否匹配。 以下是一些可能的替代型号: #### 1. LP240WU1-SLPA 这是同一制造商(LG Display)的另一款同尺寸、同分辨率的 IPS 面板。该型号在电气特性和机械结构上与 LP240WU1-SPCA 相似,因此具有较高的兼容性。适用于需要相同性能但希望更换为不同批次或版本的用户[^1]。 #### 2. LQ240WU1-SWLA 同样来自 LG Display 的 24 英寸 WUXGA 面板,采用更先进的 IPS 技术,支持更高的亮度和对比度。尽管其物理尺寸一致,但在安装前仍需确认主板接口和电源要求是否匹配[^1]。 #### 3. BOE NV240FHM-N62 BOE 生产的一款 24 英寸 WUXGA 分辨率显示器面板,使用 AHVA 技术提供宽视角和出色的色彩表现。虽然性能接近,但在替换时需要注意其接口为 eDP,且驱动软件可能需要更新以适配新硬件[^1]。 #### 4. AU Optronics B240QW-J2N AUO 提供的这款 24 英寸 QWXGA 显示器支持 WUXGA 分辨率,具备 178° 宽视角和高色彩准确度。尽管其分辨率和可视角度符合标准,但由于使用不同的背光系统,在替换过程中应检查背光控制器是否兼容[^1]。 ### 替换建议 在选择替代面板时,必须确保以下几点: - **接口一致性**:确认新面板的信号接口(如 eDP、LVDS)与原设备主板兼容。 - **电源需求**:核实新面板的电压和电流要求是否与当前供电系统匹配。 - **机械尺寸**:测量面板的外形尺寸和安装孔位,确保能够正确固定并适配外壳。 - **固件支持**:某些情况下,可能需要更新显示驱动程序或主板固件以识别新面板[^1]。 ```python # 示例代码:检查面板兼容性参数 def check_compatibility(original, candidate): compatible = True for key in original: if key != "panel_model" and original[key] != candidate.get(key, None): print(f"{key} 不匹配: {original[key]} vs {candidate[key]}") compatible = False return compatible p2423_panel = { "panel_model": "LP240WU1-SPCA", "manufacturer": "LG Display", "technology": "IPS", "resolution": "1920x1200", "color_depth": "16.7M", "viewing_angle": "178°" } candidates = [ {"panel_model": "LP240WU1-SLPA", "manufacturer": "LG Display", "technology": "IPS", "resolution": "1920x1200", "color_depth": "16.7M", "viewing_angle": "178°"}, {"panel_model": "LQ240WU1-SWLA", "manufacturer": "LG Display", "technology": "IPS", "resolution": "1920x1200", "color_depth": "1.07B", "viewing_angle": "178°"}, {"panel_model": "NV240FHM-N62", "manufacturer": "BOE", "technology": "AHVA", "resolution": "1920x1200", "color_depth": "1.07B", "viewing_angle": "178°"}, {"panel_model": "B240QW-J2N", "manufacturer": "AUO", "technology": "IPS", "resolution": "1920x1200", "color_depth": "16.7M", "viewing_angle": "178°"} ] for candidate in candidates: print(f"Checking {candidate['panel_model']}...") result = check_compatibility(p2423_panel, candidate) print(f"是否兼容: {'是' if result else '否'}\n") ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值