invert color of image

本文介绍如何使用OpenCV库实现图像反转功能,通过读取图像、获取图像数据并进行数据反转,最终展示处理后的图像。涵盖了图像加载、属性获取、数据操作及显示等关键步骤。
int _tmain(int argc, _TCHAR* argv[]) {
	IplImage *img = NULL;
	int height, width, step, channels;
	uchar *data;
	int i, j, k;
	//load an image
	img = cvLoadImage("Lena.jpg");
	if(!img) {
		perror("Cannot open the file\n");
		return -1;
	}

	//get the image data
	height = img ->height;
	width = img ->width;
	step = img ->widthStep;
	channels = img ->nChannels;
	data = (uchar *)img ->imageData;
	printf("Processing a %d*%d image with %d channels\n", height, width, channels);

	//create a window
	cvNamedWindow("test1", CV_WINDOW_AUTOSIZE);
	cvMoveWindow("test1", 100, 100);

	//invert the image
	//which is equal to cvNot(img);
	/*IplImage *dst = cvCreateImage(cvGetSize(img), img ->depth, channels);
	cvNot(img, dst);*/
	for(int i = 0; i < height; i++) {
		for(int j = 0; j < width; j++) {
			for(k = 0; k < channels; k++) {
				data[i * step + j * channels + k] = 
					255 - data[i * step + j * channels + k];
			}
		}
	}

	//show the image
	cvShowImage("test1", img);

	cvWaitKey(0);

	cvReleaseImage(&img);
	//cvReleaseImage(&dst);
	cvDestroyAllWindows();

	return 0;
}

## License: Apache 2.0. See LICENSE file in root directory. ## Copyright(c) 2015-2017 Intel Corporation. All Rights Reserved. ############################################### ## Open CV and Numpy integration ## ############################################### import pyrealsense2 as rs import numpy as np import cv2 # Configure depth and color streams pipeline = rs.pipeline() config = rs.config() # Get device product line for setting a supporting resolution pipeline_wrapper = rs.pipeline_wrapper(pipeline) pipeline_profile = config.resolve(pipeline_wrapper) device = pipeline_profile.get_device() device_product_line = str(device.get_info(rs.camera_info.product_line)) found_rgb = False for s in device.sensors: if s.get_info(rs.camera_info.name) == 'RGB Camera': found_rgb = True break if not found_rgb: print("The demo requires Depth camera with Color sensor") exit(0) config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30) # Start streaming pipeline.start(config) try: while True: # Wait for a coherent pair of frames: depth and color frames = pipeline.wait_for_frames() depth_frame = frames.get_depth_frame() color_frame = frames.get_color_frame() if not depth_frame or not color_frame: continue # Convert images to numpy arrays depth_image = np.asanyarray(depth_frame.get_data()) color_image = np.asanyarray(color_frame.get_data()) # Apply colormap on depth image (image must be converted to 8-bit per pixel first) depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET) depth_colormap_dim = depth_colormap.shape color_colormap_dim = color_image.shape # If depth and color resolutions are different, resize color image to match depth image for display if depth_colormap_dim != color_colormap_dim: resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA) images = np.hstack((resized_color_image, depth_colormap)) else: images = np.hstack((color_image, depth_colormap)) # Show images cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE) cv2.imshow('RealSense', images) cv2.waitKey(1) finally: # Stop streaming pipeline.stop() 和这个一块写
07-04
#include "ImageWidget.h" #include <QImage> #include <QPainter> #include <QtWidgets> #include <iostream> #include <Eigen/Dense> using std::cout; using std::endl; using namespace Eigen; ImageWidget::ImageWidget(void) { ptr_image_ = new QImage(); ptr_image_backup_ = new QImage(); } ImageWidget::~ImageWidget(void) { } void ImageWidget::paintEvent(QPaintEvent* paintevent) { QPainter painter; painter.begin(this); // Draw background painter.setBrush(Qt::lightGray); QRect back_rect(0, 0, width(), height()); painter.drawRect(back_rect); // Draw image QRect rect = QRect((width() - ptr_image_->width()) / 2, (height() - ptr_image_->height()) / 2, ptr_image_->width(), ptr_image_->height()); painter.drawImage(rect, *ptr_image_); // 绘制控制点 painter.setBrush(Qt::red); for (const QPointF& p : src_points_) painter.drawEllipse(p, 4, 4); painter.end(); } void ImageWidget::mousePressEvent(QMouseEvent* event) { QPointF pos = event->position(); // 判断是否点击已有控制点 for (int i = 0; i < src_points_.size(); ++i) { if (QLineF(src_points_[i], pos).length() < 8) { selected_point_index_ = i; is_dragging_ = true; return; } } // 否则新增控制点 src_points_.append(pos); dst_points_.append(pos); update(); } void ImageWidget::mouseMoveEvent(QMouseEvent* event) { if (is_dragging_ && selected_point_index_ >= 0) { dst_points_[selected_point_index_] = event->position(); update(); } } void ImageWidget::mouseReleaseEvent(QMouseEvent*) { if (is_dragging_) { is_dragging_ = false; Warp(); // 拖动结束执行扭曲 } } void ImageWidget::Open() { // Open file QString fileName = QFileDialog::getOpenFileName(this, tr("Read Image"), ".", tr("Images(*.bmp *.png *.jpg)")); // Load file if (!fileName.isEmpty()) { ptr_image_->load(fileName); *(ptr_image_backup_) = *(ptr_image_); } //ptr_image_->invertPixels(QImage::InvertRgb); //*(ptr_image_) = ptr_image_->mirrored(true, true); //*(ptr_image_) = ptr_image_->rgbSwapped(); cout << "image size: " << ptr_image_->width() << ' ' << ptr_image_->height() << endl; update(); } void ImageWidget::Save() { SaveAs(); } void ImageWidget::SaveAs() { QString filename = QFileDialog::getSaveFileName(this, tr("Save Image"), ".", tr("Images(*.bmp *.png *.jpg)")); if (filename.isNull()) { return; } ptr_image_->save(filename); } void ImageWidget::Invert() { for (int i = 0; i < ptr_image_->width(); i++) { for (int j = 0; j < ptr_image_->height(); j++) { QRgb color = ptr_image_->pixel(i, j); ptr_image_->setPixel(i, j, qRgb(255 - qRed(color), 255 - qGreen(color), 255 - qBlue(color))); } } // equivalent member function of class QImage // ptr_image_->invertPixels(QImage::InvertRgb); update(); } void ImageWidget::Mirror(bool ishorizontal, bool isvertical) { QImage image_tmp(*(ptr_image_)); int width = ptr_image_->width(); int height = ptr_image_->height(); if (ishorizontal) { if (isvertical) { for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) { ptr_image_->setPixel(i, j, image_tmp.pixel(width - 1 - i, height - 1 - j)); } } } else //仅水平翻转 { for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) { ptr_image_->setPixel(i, j, image_tmp.pixel(width - 1 - i, j)); } } } } else { if (isvertical) //仅垂直翻转 { for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) { ptr_image_->setPixel(i, j, image_tmp.pixel(i, height - 1 - j)); } } } } // equivalent member function of class QImage //*(ptr_image_) = ptr_image_->mirrored(true, true); update(); } void ImageWidget::TurnGray() { for (int i = 0; i < ptr_image_->width(); i++) { for (int j = 0; j < ptr_image_->height(); j++) { QRgb color = ptr_image_->pixel(i, j); int gray_value = (qRed(color) + qGreen(color) + qBlue(color)) / 3; ptr_image_->setPixel(i, j, qRgb(gray_value, gray_value, gray_value)); } } update(); } void ImageWidget::Warp() { if (!ptr_image_ || src_points_.isEmpty()) return; int n = src_points_.size(); int width = ptr_image_->width(); int height = ptr_image_->height(); QImage warped(width, height, ptr_image_->format()); //构造矩阵 A(n×n) MatrixXd A(n, n); auto phi = [](double r) { return r == 0 ? 0 : r * r * log(r + 1e-6); }; for (int i = 0; i < n; ++i) for (int j = 0; j < n; ++j) A(i, j) = phi(QLineF(src_points_[i], src_points_[j]).length()); //计算控制点的目标位移 VectorXd dx(n), dy(n); for (int i = 0; i < n; ++i) { dx(i) = dst_points_[i].x() - src_points_[i].x(); dy(i) = dst_points_[i].y() - src_points_[i].y(); } //求解权重系数 w VectorXd wx = A.ldlt().solve(dx); VectorXd wy = A.ldlt().solve(dy); //对每个像素计算位移并赋值 for (int y = 0; y < height; ++y) { for (int x = 0; x < width; ++x) { QPointF p(x, y); double fx = 0, fy = 0; for (int i = 0; i < n; ++i) { double r = QLineF(p, src_points_[i]).length(); double val = phi(r); fx += wx(i) * val; fy += wy(i) * val; } int new_x = int(x + fx); int new_y = int(y + fy); if (new_x >= 0 && new_x < width && new_y >= 0 && new_y < height) warped.setPixel(x, y, ptr_image_->pixel(new_x, new_y)); } } *ptr_image_ = warped; update(); } void ImageWidget::Restore() { *(ptr_image_) = *(ptr_image_backup_); update(); } 检查上述代码,为什么运行起来只能点击鼠标添加红点,拖动并没有扭曲的效果
最新发布
11-03
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值