旋转,移动,frame的混用,

本文介绍如何使用UIRotationGestureRecognizer和UIPanGestureRecognizer实现iOS应用中图像视图的旋转和平移功能。通过设置手势识别器的目标和动作,可以响应用户的触摸操作,并更新图像视图的显示状态。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

 UIRotationGestureRecognizer *rotationRecognizer = [[UIRotationGestureRecognizer alloc] initWithTarget:self action:@selector(rotate:)];

    [rotationRecognizer setDelegate:self];

    [self.view addGestureRecognizer:rotationRecognizer];

    

    UIPanGestureRecognizer *panRecognizer = [[UIPanGestureRecognizer alloc] initWithTarget:self action:@selector(move:)];

    [panRecognizer setMinimumNumberOfTouches:1];

    [panRecognizer setMaximumNumberOfTouches:1];

    [panRecognizer setDelegate:self];

    [self.view addGestureRecognizer:panRecognizer];

// 旋转

-(void)rotate:(id)sender {

    

    if([(UIRotationGestureRecognizer*)sender state] == UIGestureRecognizerStateEnded) {

        

        _lastRotation = 0.0;

        return;

    }

    

    CGFloat rotation = 0.0 - (_lastRotation - [(UIRotationGestureRecognizer*)sender rotation]);

    

    CGAffineTransform currentTransform = _imageView.transform;

    CGAffineTransform newTransform = CGAffineTransformRotate(currentTransform,rotation);

    

    [_imageView setTransform:newTransform];

    

    _lastRotation = [(UIRotationGestureRecognizer*)sender rotation];

    

}


// 移动

-(void)move:(id)sender {

    

    CGPoint translatedPoint = [(UIPanGestureRecognizer*)sender translationInView:self.view];

    

    if([(UIPanGestureRecognizer*)sender state] == UIGestureRecognizerStateBegan) {

        

        _startPoint = translatedPoint;

        _firstX = [_imageView center].x;

        _firstY = [_imageView center].y;

    }

    else if([(UIPanGestureRecognizer*)sender state] == UIGestureRecognizerStateChanged)

    {

        _changePoint = translatedPoint;

        

        _imageView.transform = CGAffineTransformTranslate(_imageView.transform, _changePoint.x - _startPoint.x, _changePoint.y - _startPoint.y);


        _startPoint = _changePoint;

    }

}


// 改变 frame 的 height

- (IBAction)declineHH:(id)sender

{

    CGAffineTransform aaaaaTransform = _imageView.transform;

    

    [_imageView setTransform:CGAffineTransformIdentity];

    

    _imageView.frame = CGRectMake(_imageView.frame.origin.x, _imageView.frame.origin.y, _imageView.frame.size.width, _imageView.frame.size.height - 2);

    

    [_imageView setTransform:aaaaaTransform];

}

// 改变frame width

- (IBAction)declineWW:(id)sender

{

    CGAffineTransform aaaaaTransform = _imageView.transform;

    

    [_imageView setTransform:CGAffineTransformIdentity];

    

    _imageView.frame = CGRectMake(_imageView.frame.origin.x, _imageView.frame.origin.y, _imageView.frame.size.width - 2, _imageView.frame.size.height);

    

    [_imageView setTransform:aaaaaTransform];

}





按照我的要求给出不报错的代码,VS2019,PCL版本为1.10.1,给出完整代码,这些代码用于测量行李箱的长宽高,误差要在1cm以内,尽可能优化它,双目摄像头垂直在行李箱上方160cm左右,行李箱位于传送带上,在我给出的代码上进行二次开发 main: #include "gvCamera.h" #include <iostream> int main(int argc, char* argv[]) { std::cout << " **************************************** " << std::endl; std::cout << " ************** GV_D100 ***************** " << std::endl; std::cout << " **************************************** " << std::endl; std::shared_ptr<GvCamera> gvCamera = std::make_shared<GvCamera>(); bool irSwich = false; bool depthSwitch = true; bool rgbSwitch = true; bool alignDepthSwitch = false; gvCamera->setIRSwitch(irSwich); gvCamera->setDepthSwitch(depthSwitch); gvCamera->setRGBSwitch(rgbSwitch); if (alignDepthSwitch && depthSwitch) gvCamera->setDepthAlignSwitch(alignDepthSwitch); gvCamera->Preview3DCamera(); std::vector<float> calibData = gvCamera->getCalibrationParams(); std::cout << "Calibration Params : " << calibData[0] << " " << calibData[1] << " " << calibData[2] << " " << calibData[3] << std::endl; cv::Mat leftCameraMatrix = gvCamera->getLeftCameraMatrix(); std::cout << "leftCameraMatrix : " << leftCameraMatrix << std::endl; cv::Mat leftDistCoeffs = gvCamera->getLeftDistCoeffs(); std::cout << "leftDistCoeffs : " << leftDistCoeffs << std::endl; cv::Mat rightCameraMatrix = gvCamera->getRightCameraMatrix(); std::cout << "rightCameraMatrix : " << rightCameraMatrix << std::endl; cv::Mat rightDistCoeffs = gvCamera->getRightDistCoeffs(); std::cout << "rightDistCoeffs : " << rightDistCoeffs << std::endl; cv::Mat stereoRotation = gvCamera->getStereoRotation(); std::cout << "stereoRotation : " << stereoRotation << std::endl; cv::Mat stereoTranslation = gvCamera->getStereoTranslation(); std::cout << "stereoTranslation : " << stereoTranslation << std::endl; cv::Mat rgbCammeraMatrix = gvCamera->getRgbCameraMatrix(); std::cout << "rgbCameraMatrix : " << rgbCammeraMatrix << std::endl; cv::Mat rgbDistCoeffs = gvCamera->getRgbDistCoeffs(); std::cout << "rgbDistCoeffs : " << rgbDistCoeffs << std::endl; int waitTime = 0; while (true) { waitTime++; if (gvCamera->getIRSwitch()) { cv::Mat leftIr = gvCamera->getLeftIRImage(); cv::Mat rightIr = gvCamera->getRightIRImage(); if (!leftIr.empty()) cv::imshow("leftIr", leftIr); if (!rightIr.empty()) cv::imshow("rightIr", rightIr); } if (gvCamera->getDepthSwitch()) { cv::Mat depth = gvCamera->getDepthImage(); if (!depth.empty()) { //cv::imshow("depth", depth); cv::Mat nmt, colored; cv::convertScaleAbs(depth, nmt, 0.1); cv::applyColorMap(nmt, colored, cv::COLORMAP_JET); cv::imshow("depth ", colored); } } if (gvCamera->getRGBSwitch()) { cv::Mat rgb = gvCamera->getRgbImage(); if (!rgb.empty()) cv::imshow("rgb", rgb); if (waitTime > 5) { if (gvCamera->getDepthSwitch() && gvCamera->getRGBSwitch() && gvCamera->getDepthAlignSwitch()) { // 获取单通道的深度图 cv::Mat alignDepth = gvCamera->getAlignDepthImage(); if (!alignDepth.empty()) { cv::imshow("alignDepth", alignDepth); } cv::Mat alignColorDepth = gvCamera->getAlignColorDepthImage(); if (!alignColorDepth.empty()) cv::imshow("alignColorDepth", alignColorDepth); // 获取三通道的深度图 cv::Mat alignPointCloud = gvCamera->getAlinePointCloud(); if (!alignPointCloud.empty()) cv::imshow("alignPointCloud", alignPointCloud); } } } cv::waitKey(10); } return 0; } gvcamera.h: #include <dshow.h> #include "qedit.h" #include <vector> #include <functional> #include <Windows.h> #include <string> #include <iostream> #include <comutil.h> #include <atlstr.h> #include <opencv.hpp> #include <highgui.hpp> #include <iostream> #include "camera.hpp" #include <condition_variable> #include "VideoCapture.h" #include "SendDataByUSB.h" #include "depth2color.h" #pragma comment(lib, "strmiids") #pragma comment(lib, "comsupp.lib") #define IMAGE_W 640 #define IMAGE_H 400 class GvCamera { public: GvCamera(); ~GvCamera(); void Preview3DCamera(); void Stop3DCamera(); void initialVideo(); void processFrame(const unsigned char* buff, int len, VideoDevice* device); //void depth2RgbPointCloud(const cv::Mat& depthImage, const cv::Mat& color, const cv::Mat& cameraMatix, pcl::PointCloud<pcl::PointXYZRGB>::Ptr& pointcloud); //void depth2PointCloud(const cv::Mat& depthImage, const cv::Mat& cameraMatix, pcl::PointCloud<pcl::PointXYZ>::Ptr& pointcloud); //void ReadWriteConfig(bool bRead, bool bOpenRGB = false); bool ReadCameraCalibrationFactor(); bool ReadIR2IRCalibrationParams(); bool ReadIR2RGBCalibrationParams(); inline void setIRSwitch(bool showIR) { bShowIR = showIR; } inline bool getIRSwitch() { return bShowIR; } inline void setRGBSwitch(bool showRGB) { bShowRGB = showRGB; } inline bool getRGBSwitch() { return bShowRGB; }; inline void setDepthSwitch(bool showDepth) { bShowDepth = showDepth; } inline bool getDepthSwitch() { return bShowDepth; }; inline void setDepthAlignSwitch(bool showDepthAlign) { bShowDepthAlign = showDepthAlign; } inline bool getDepthAlignSwitch() { return bShowDepthAlign; }; //getImage inline cv::Mat getLeftIRImage() { std::lock_guard<std::mutex> lock(dataLock_); return leftIRImage_; }; inline cv::Mat getRightIRImage() { std::lock_guard<std::mutex> lock(dataLock_); return rightIRImage_; }; inline cv::Mat getDepthImage() { std::lock_guard<std::mutex> lock(dataLock_); return depthImage_; }; inline cv::Mat getAlignDepthImage() { std::lock_guard<std::mutex> lock(dataLock_); return alignDepthImage_; } inline cv::Mat getAlignColorDepthImage() { std::lock_guard<std::mutex> lock(dataLock_); return alignColorDepthImage_; } inline cv::Mat getRgbImage() { std::lock_guard<std::mutex> lock(dataLock_); return rgbImage_; }; inline cv::Mat getAlinePointCloud() { std::lock_guard<std::mutex> lock(dataLock_); return alignPointCloudImage_; } //get Depth params inline std::vector<float> getCalibrationParams() { return calibParams_; }; //get IR params inline cv::Mat getLeftCameraMatrix() { return leftCameraMatrix_; }; inline cv::Mat getLeftDistCoeffs() { return leftDistCoeffs_; }; inline cv::Mat getRightCameraMatrix() { return rightCameraMatrix_; }; inline cv::Mat getRightDistCoeffs() { return rightDistCoeffs_; }; inline cv::Mat getStereoRotation() { return stereoRotation_; }; inline cv::Mat getStereoTranslation() { return stereoTranslation_; }; //get rgb params inline cv::Mat getRgbCameraMatrix() { return rgbCameraMatrix_; }; inline cv::Mat getRgbDistCoeffs() { return rgbDistCoeffs_; }; private: Camera* m_pCamera; VideoCapture_HC* m_videoCapture; std::vector<float> calibParams_; bool m_bCloseAll = true; bool arr_bCloseWindows[4] = { false }; bool b_Find_Camera_Dev = true; bool bShowRGB; bool bShowIR; bool bShowDepth; bool bShowDepthAlign; bool dataReady_; // Image cv::Mat leftIRImage_; cv::Mat rightIRImage_; cv::Mat depthImage_; cv::Mat rgbImage_; cv::Mat alignDepthImage_; cv::Mat alignColorDepthImage_; cv::Mat alignPointCloudImage_; // params cv::Mat leftCameraMatrix_; cv::Mat leftDistCoeffs_; cv::Mat rightCameraMatrix_; cv::Mat rightDistCoeffs_; cv::Mat stereoRotation_; cv::Mat stereoTranslation_; cv::Mat rgbCameraMatrix_; cv::Mat rgbDistCoeffs_; //pcl::PointCloud<pcl::PointXYZRGB>::Ptr pointcloud_; //int pcdStartTime_; std::mutex cameraClock_; std::condition_variable cameraCond_; int nCurrent = 0x7f; CString strCameraSN; std::mutex dataLock_; gv::DepthAlignToColor depth2Color_; gv::DepthAlignToColor::Parameters d2cParams_; };
07-02
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值