CMakeList.txt中设置一个可变的变量的值(bool)

本文介绍如何在CMakeList.txt中根据构建类型设置一个布尔变量DEBUG_OFF。在debug模式下将其设为OFF,在release等模式下设为ON,并展示了如何在代码中使用此宏进行条件编译。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

在CMakeList.txt中有个bool变量,在debug模式下需要设置为OFF,在其他模式(release、thread、leak)下设置为ON,需要在makefile中将该值设置不同的值,CMakeList.txt中增加的代码如下:

IF(CMAKE_BUILD_TYPE STREQUAL "DEBUG")
  SET(DEBUG_OFF OFF)
ELSE()
  SET(DEBUG_OFF ON)

## 这个是打印DEBUG_OFF的值用的
MESSAGE("DEBUG_OFF=" ${LIBRARY_PATH})

这样就可以在代码中使用该宏:DEBUG_OFF,例如:

#include "a.h"
int sqrt(int x) {
	long t = x;
	
	while (t*t > x) {
		t = (t + x/t) / 2;
	}
	
	#ifndef DEBUG_OFF
		if (t > INT_MIN && t <= INT_MAX) {
			
		} else {
			std::cout << "overflow int value range." << std::endl;
		}
	#endif
	
	return t;
}

int GetConvDetectionPoseResultInt8(int8_t **pBlob, std::vector<int> &qnt_zp, std::vector<float> &qnt_scale, std::vector<float> &DetectiontRects,std::vector<std::map<int, KeyPoint>> &keypoints) { static auto meshgrid = GenerateMeshgrid(); int ret = 0; int gridIndex = -2; float xmin = 0, ymin = 0, xmax = 0, ymax = 0; float cls_val = 0; float cls_max = 0; int cls_index = 0; int quant_zp_cls = 0, quant_zp_reg = 0, quant_zp_msk, quant_zp_seg = 0; float quant_scale_cls = 0, quant_scale_reg = 0, quant_scale_msk = 0, quant_scale_seg = 0; int quant_zp_pose = 0; float quant_scale_pose = 0; int model_class_num = class_num; std::vector<DetectRect> detectRects; for (int index = 0; index < headNum; index++) { int8_t *reg = (int8_t *)pBlob[index * 2 + 0]; int8_t *cls = (int8_t *)pBlob[index * 2 + 1]; int8_t *pose = (int8_t *)pBlob[index + headNum * 2]; quant_zp_reg = qnt_zp[index * 2 + 0]; quant_zp_cls = qnt_zp[index * 2 + 1]; quant_scale_reg = qnt_scale[index * 2 + 0]; quant_scale_cls = qnt_scale[index * 2 + 1]; quant_zp_pose = qnt_zp[index + headNum * 2]; quant_scale_pose = qnt_scale[index + headNum * 2]; for (int h = 0; h < mapSize[index][0]; h++) { for (int w = 0; w < mapSize[index][1]; w++) { gridIndex += 2; for (int cl = 0; cl < model_class_num; cl++) { cls_val = sigmoid( DeQnt2F32(cls[cl * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w], quant_zp_cls, quant_scale_cls)); if (0 == cl) { cls_max = cls_val; cls_index = cl; } else { if (cls_val > cls_max) { cls_max = cls_val; cls_index = cl; } } } if (cls_max > objectThreshold) { DetectRect temp; xmin = (meshgrid[gridIndex + 0] - DeQnt2F32(reg[0 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w], quant_zp_reg, quant_scale_reg)) * strides[index]; ymin = (meshgrid[gridIndex + 1] - DeQnt2F32(reg[1 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w], quant_zp_reg, quant_scale_reg)) * strides[index]; xmax = (meshgrid[gridIndex + 0] + DeQnt2F32(reg[2 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w], quant_zp_reg, quant_scale_reg)) * strides[index]; ymax = (meshgrid[gridIndex + 1] + DeQnt2F32(reg[3 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w], quant_zp_reg, quant_scale_reg)) * strides[index]; xmin = xmin > 0 ? xmin : 0; ymin = ymin > 0 ? ymin : 0; xmax = xmax < input_w ? xmax : input_w; ymax = ymax < input_h ? ymax : input_h; if (xmin >= 0 && ymin >= 0 && xmax <= input_w && ymax <= input_h) { temp.xmin = xmin / input_w; temp.ymin = ymin / input_h; temp.xmax = xmax / input_w; temp.ymax = ymax / input_h; temp.classId = cls_index; temp.score = cls_max; for (int kc = 0; kc < keypoint_num; kc++) { KeyPoint kp; kp.x = (DeQnt2F32(pose[(kc * 3 + 0) * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w], quant_zp_pose, quant_scale_pose) * 2 + (meshgrid[gridIndex + 0] - 0.5)) * strides[index] / input_w; kp.y = (DeQnt2F32(pose[(kc * 3 + 1) * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w], quant_zp_pose, quant_scale_pose) * 2 + (meshgrid[gridIndex + 1] - 0.5)) * strides[index] / input_h; kp.score = sigmoid(DeQnt2F32(pose[(kc * 3 + 2) * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w], quant_zp_pose, quant_scale_pose)); kp.id = kc; temp.keyPoints.push_back(kp); } detectRects.push_back(temp); } } } } } std::sort(detectRects.begin(), detectRects.end(), [](DetectRect &Rect1, DetectRect &Rect2) -> bool { return (Rect1.score > Rect2.score); }); NN_LOG_DEBUG("NMS Before num :%ld", detectRects.size()); for (int i = 0; i < detectRects.size(); ++i) { float xmin1 = detectRects[i].xmin; float ymin1 = detectRects[i].ymin; float xmax1 = detectRects[i].xmax; float ymax1 = detectRects[i].ymax; int classId = detectRects[i].classId; float score = detectRects[i].score; if (classId != -1) { // 将检测结果按照classId、score、xmin1、ymin1、xmax1、ymax1 的格式存放在vector<float>中 DetectiontRects.push_back(float(classId)); DetectiontRects.push_back(float(score)); DetectiontRects.push_back(float(xmin1)); DetectiontRects.push_back(float(ymin1)); DetectiontRects.push_back(float(xmax1)); DetectiontRects.push_back(float(ymax1)); std::map<int, KeyPoint> kps; for (int kn = 0; kn < keypoint_num; kn++) { kps.insert({kn, detectRects[i].keyPoints[kn]}); } keypoints.push_back(kps); for (int j = i + 1; j < detectRects.size(); ++j) { float xmin2 = detectRects[j].xmin; float ymin2 = detectRects[j].ymin; float xmax2 = detectRects[j].xmax; float ymax2 = detectRects[j].ymax; float iou = IOU(xmin1, ymin1, xmax1, ymax1, xmin2, ymin2, xmax2, ymax2); if (iou > nmsThreshold) { detectRects[j].classId = -1; } } } } return ret; } 将下面的python代码严格转成c++代码集成的上面int GetConvDetectionPoseResultInt8()方法中,并使用cmake构建项目 crop_ROI = [] for i, connection in enumerate(skeleton): start_idx, end_idx = connection if (confidences[start_idx] > 0.3 and confidences[end_idx] > 0.3): start_point = tuple(map(int, keypoints[start_idx])) end_point = tuple(map(int, keypoints[end_idx])) if start_point == (0, 0) or end_point == (0, 0): continue if (start_idx==5 and end_idx==6): crop_ROI.append(start_point) crop_ROI.append(end_point) if (start_idx==11 and end_idx==12): crop_ROI.append(end_point) crop_ROI.append(start_point) cv2.line(frame, start_point, end_point, (255, 0, 0), 2) # print('crop_ROI: ', crop_ROI) if len(crop_ROI) < 4: continue ROI = crop_perspective(frame, crop_ROI) # ROI = crop_mask(image, crop_ROI) ROI_resize = cv2.resize(ROI.copy(), (320, 320)) # print(ROI.shape, "ROI_resize.shape: ", ROI_resize.shape, temp_img.shape) si_list = color_similarity(ROI_resize, temp_img) # si_list = compare_color_histograms(ROI_resize, temp_img) # print(si_list) cv2.rectangle(frame, crop_ROI[0], crop_ROI[2], (0, 255, 0), 2) cv2.putText(frame, f"si:{round(float(si_list[0]), 3)}", (crop_ROI[1][0], crop_ROI[1][1]), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 0), 3) cv2.imshow('ROI', ROI_resize) #相似度保存 similarity_list.append(similarity) return similarity_list # 关键点连接关系定义(COCO格式) skeleton = [ (0, 1), # 鼻子 → 左眼 (0, 2), # 鼻子 → 右眼 (1, 3), # 左眼 → 左耳 (2, 4), # 右眼 → 右耳 (0, 5), # 鼻子 → 左肩 (0, 6), # 鼻子 → 右肩 (5, 6), # 左肩 → 右肩(躯干上部) (5, 7), # 左肩 → 左肘 (7, 9), # 左肘 → 左手腕 (6, 8), # 右肩 → 右肘 (8, 10), # 右肘 → 右手腕 (5, 11), # 左肩 → 左髋 (6, 12), # 右肩 → 右髋 (11, 12), # 左髋 → 右髋(躯干下部) (11, 13), # 左髋 → 左膝盖 (13, 15), # 左膝盖 → 左脚踝 (12, 14), # 右髋 → 右膝盖 (14, 16) # 右膝盖 → 右脚踝 ] def color_similarity(img1, img2): #0~1: 1表示完全相似 0表示完全不相似 similarity_list = [] # 读取图像并转换为HSV img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2HSV) img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV) # 计算平均HSV avg_hsv1 = np.mean(img1, axis=(0,1)) avg_hsv2 = np.mean(img2, axis=(0,1)) # 归一化处理(H:0-180°→0-1, S/V:0-255→0-1) norm_hsv1 = [avg_hsv1[0]/180, avg_hsv1[1]/255, avg_hsv1[2]/255] norm_hsv2 = [avg_hsv2[0]/180, avg_hsv2[1]/255, avg_hsv2[2]/255] # 计算三维空间距离 delta = np.array(norm_hsv1) - np.array(norm_hsv2) distance = np.linalg.norm(delta) similarity = 1 - min(distance, 1.0) #相似度保存 similarity_list.append(similarity) return similarity_list
最新发布
07-01
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值