计算轮廓矩

本文介绍了一种基于OpenCV的图像轮廓分析方法,通过计算图像的矩来提取轮廓特征,如面积、周长、椭圆拟合等,并展示了如何使用这些特征进行图像分析。

#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/features2d/features2d.hpp"
#include <stdlib.h>
#include <stdio.h>
using namespace cv;
using namespace std;
// 计算图像矩
void cacMoments(cv::Mat src)
{
    Mat srcGray;
    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
    // 高斯滤波
    GaussianBlur( src, src, Size(3,3), 0.1, 0, BORDER_DEFAULT );
    // 灰度转换
    cvtColor(src, srcGray, CV_RGB2GRAY);
    // 轮廓边界检测
    findContours(srcGray, contours, hierarchy,
        CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
    // 绘制边界
    // drawContours(src, contours, -1, cvScalar(0,0,255));
    printf("Number of contours: %d\n", (int)contours.size());
    // 计算轮廓矩
    vector<Moments> mu(contours.size());
    for(int i = 0; i < (int)contours.size(); i++)
    {
        mu[i] = moments(contours[i], false);
    }
    // 分析矩计算图像相关特征
    for(int i = 0; i < (int)contours.size(); i++)
    {
        // 面积 重心 边界轮廓长度
        int area = mu[i].m00;
        int cx = mu[i].m10/mu[i].m00;
        int cy = mu[i].m01/mu[i].m00;
        int perimeter = arcLength(contours.at(i), true);
        // 椭圆
        if (int(contours.at(i).size()) <= 5)
        {
            continue;
        }else
        {
            RotatedRect rRect = fitEllipse(contours.at(i));
            double orientation = rRect.angle;
            double orientation_rads = orientation*3.1416/180;
            double majorAxis = rRect.size.height > rRect.size.width ? rRect.size.height : rRect.size.width;
            double minorAxis = rRect.size.height > rRect.size.width ? rRect.size.width : rRect.size.height;
            // 圆形度 离心率 周长 直径
            double roundness = pow((double)perimeter, 2)/(2*3.1416*area);
            double eccentricity = sqrt(1-pow(minorAxis/majorAxis,2));
            double ratio = (minorAxis / majorAxis) * 100;
            double diameter = sqrt((4*area)/3.1416);
            // 输出相关特征信息
            printf("Area: %d\n", area);
            printf("Perimeter: %d\n", perimeter);
            printf("Major Axis: %.1f\n", majorAxis);
            printf("Minor Axis: %.1f\n", minorAxis);
            printf("Orientation: %.1f\n", orientation);
            printf("Roundness: %.1f\n", roundness);
            printf("Eccentricity: %.1f\n", eccentricity);
            printf("Ratio: %.1f\n", ratio);
            printf("Diameter: %.1f\n", diameter);
            printf("\n");
            // 绘制矩形及椭圆
            ellipse(src, rRect, cvScalar(0,255,0));
            rectangle(src, boundingRect(contours.at(i)), cvScalar(0,0,255));
            // 绘制相关坐标
            //line(src, Point(cx-30, cy), Point(cx+30, cy), cvScalar(0,0,255));
            //line(src, Point(cx, cy-30), Point(cx, cy+30), cvScalar(0,0,255));
            // 绘制起始线
            line(src, Point(cx, cy), Point((int)(cx+30*cos(orientation_rads)),
                (int)(cy+30*sin(orientation_rads))), cvScalar(255,0,0), 1);
            // 输出图像起始
            char pChar[100];
            sprintf(pChar, "Ori: %.0f", orientation);
            putText(src, pChar, Point(cx, cy), FONT_HERSHEY_SIMPLEX, 0.4, cvScalar(255));
        }
    }
    cv::imshow( "result", src);
}
int main(){
    cv::Mat srcImage = imread("22.jpg");
    if( !srcImage.data )
        return -1; 
    cv::imshow( "srcImage", srcImage );
    // 计算轮廓矩
    cacMoments(srcImage);  
    cv::waitKey(0);
    return(0);
}



转载:http://blog.youkuaiyun.com/zhuwei1988

#!/usr/bin/env python3 import rospy import cv2 import numpy as np from sensor_msgs.msg import Image from cv_bridge import CvBridge from sklearn.decomposition import PCA def recognize(): rospy.loginfo("开始识别香蕉...") # 等待图像消息 try: image_msg = rospy.wait_for_message("/camera/color/image_raw", Image, timeout=5.0) except rospy.ROSException: rospy.logwarn("未接收到图像消息") return None, 0 # 转换为OpenCV格式 bridge = CvBridge() try: cv_image = bridge.imgmsg_to_cv2(image_msg, "bgr8") except Exception as e: rospy.logerr(f"图像转换错误: {str(e)}") return None, 0 # 转换到HSV颜色空间 hsv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV) # 定义香蕉的HSV颜色范围 (需要根据实际环境调整) lower_yellow = np.array([20, 100, 100]) upper_yellow = np.array([30, 255, 255]) # 创建颜色掩码 mask = cv2.inRange(hsv_image, lower_yellow, upper_yellow) # 形态学操作去噪 kernel = np.ones((5, 5), np.uint8) mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel) mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel) # 查找轮廓 contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) if not contours: rospy.logwarn("未检测到香蕉轮廓") return None, 0 # 找到最大轮廓 largest_contour = max(contours, key=cv2.contourArea) # 计算轮廓中心 M = cv2.moments(largest_contour) if M["m00"] == 0: rospy.logwarn("无法计算轮廓") return None, 0 # 物体中心坐标 (在相机坐标系中) cx = int(M["m10"] / M["m00"]) cy = int(M["m01"] / M["m00"]) # 使用PCA计算主方向 points = largest_contour.reshape(-1, 2).astype(np.float32) pca = PCA(n_components=2) pca.fit(points) # 计算主方向角度 (度) direction_vector = pca.components_[0] angle_rad = np.arctan2(direction_vector[1], direction_vector[0]) angle_deg = np.degrees(angle_rad) % 180 # 计算物体3D位置 (假设在平面上,z=0) # 实际应用中应使用深度相机获取z值 depth = 0.5 # 假设深度值,实际中应从深度图获取 # 计算物体在相机坐标系中的位置 # 实际应用中应使用相机内参和深度图进行转换 fx = 525.0 # 相机焦距x (需要根据相机校准调整) fy = 525.0 # 相机焦距y cx_img = cv_image.shape[1] / 2 cy_img = cv_image.shape[0] / 2 # 计算物体位置 (相机坐标系) z = depth x = (cx - cx_img) * z / fx y = (cy - cy_img) * z / fy # 返回物体位置和主方向 return [x, y, z], angle_deg 这个代码为什么会显示错误
最新发布
11-18
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值