1054 The Dominant Color (20)(20 point(s))

1054 The Dominant Color (20)(20 point(s))

Behind the scenes in the computer's memory, color is always talked about as a series of 24 bits of information for each pixel. In an image, the color with the largest proportional area is called the dominant color. A strictly dominant color takes more than half of the total area. Now given an image of resolution M by N (for example, 800x600), you are supposed to point out the strictly dominant color.

Input Specification:

Each input file contains one test case. For each case, the first line contains 2 positive numbers: M (<=800) and N (<=600) which are the resolutions of the image. Then N lines follow, each contains M digital colors in the range [0, 2^24^). It is guaranteed that the strictly dominant color exists for each input image. All the numbers in a line are separated by a space.

Output Specification:

For each test case, simply print the dominant color in a line.

Sample Input:

5 3
0 0 255 16777215 24
24 24 0 0 24
24 0 24 24 24

Sample Output:

24

思路 ,既然最优势颜色是超过整个像素一半的数量,所以就用 1:1 对消的思路,优势颜色和非优势颜色抵消,最后剩下的就是优势颜色。

code

#include <iostream>
#include <map>
#include <stdio.h>
using namespace std;
int main(){
	int m, n, x;
	scanf("%d %d", &m, &n);
	int colornum = 0, color = 0;
	for (int i = 0; i < n; ++i) {
		for (int j = 0; j < m; ++j) {
			scanf("%d ", &x);
			if (colornum == 0) {
				color = x;
			}
			if (color == x) {
				colornum++;
			}
			else {
				colornum--;
			}
		}
	}
	printf("%d", color);
	system("pause");
	return 0;
}

 

#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Computer Camera Cuboid Detection System - Traditional Algorithm Approach Features: 1. 支持红、蓝、绿三种颜色物块识别 2. 优化红色识别,减少橙色干扰 3. 增大了最大轮廓识别面积 4. 实时桌面预览和性能统计 """ import cv2 import time import numpy as np import sys import logging # Configure logging system logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[ logging.StreamHandler(sys.stdout), logging.FileHandler('cuboid_detection.log') ] ) logger = logging.getLogger(__name__) class CuboidDetectionSystem: def __init__(self, camera_index=0): # Camera configuration self.camera_index = camera_index self.cap = None # Detection parameters self.min_area = 1000 # Minimum contour area to consider self.max_area = 150000 # Increased maximum contour area (was 50000) self.aspect_ratio_min = 0.3 # Minimum aspect ratio for rectangles self.aspect_ratio_max = 3.0 # Maximum aspect ratio for rectangles self.circularity_thresh = 0.6 # Circularity threshold for cylinders # Color detection parameters (in HSV space) # Red - optimized to reduce orange interference self.red_lower1 = np.array([0, 150, 100]) # Increased saturation and value self.red_upper1 = np.array([10, 255, 255]) self.red_lower2 = np.array([170, 150, 100]) # Increased saturation and value self.red_upper2 = np.array([180, 255, 255]) # Blue self.blue_lower = np.array([100, 120, 70]) self.blue_upper = np.array([130, 255, 255]) # Green - added for green objects self.green_lower = np.array([35, 80, 60]) # Green range self.green_upper = np.array([85, 255, 255]) # Performance tracking self.fps_history = [] self.detection_history = [] self.last_detection_time = 0 # Initialize camera self.open_camera() logger.info("Cuboid Detection System (Traditional Algorithm) initialized successfully") def open_camera(self): """Open computer's external camera""" try: self.cap = cv2.VideoCapture(self.camera_index) if not self.cap.isOpened(): # Try common alternative indices for idx in [2, 1, 0]: self.cap = cv2.VideoCapture(idx) if self.cap.isOpened(): self.camera_index = idx break if not self.cap.isOpened(): logger.error("Unable to open any camera!") return False # Set camera resolution self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) logger.info(f"Camera opened successfully at index {self.camera_index}") return True except Exception as e: logger.error(f"Camera initialization failed: {str(e)}") return False def preprocess_frame(self, frame): """Preprocess frame for contour detection""" # Convert to HSV color space hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Blur to reduce noise hsv = cv2.GaussianBlur(hsv, (5, 5), 0) # Create masks for red, blue and green red_mask1 = cv2.inRange(hsv, self.red_lower1, self.red_upper1) red_mask2 = cv2.inRange(hsv, self.red_lower2, self.red_upper2) red_mask = cv2.bitwise_or(red_mask1, red_mask2) blue_mask = cv2.inRange(hsv, self.blue_lower, self.blue_upper) green_mask = cv2.inRange(hsv, self.green_lower, self.green_upper) # Combine masks color_mask = cv2.bitwise_or(red_mask, blue_mask) color_mask = cv2.bitwise_or(color_mask, green_mask) # Apply morphological operations to clean up the mask kernel = np.ones((7, 7), np.uint8) color_mask = cv2.morphologyEx(color_mask, cv2.MORPH_OPEN, kernel) color_mask = cv2.morphologyEx(color_mask, cv2.MORPH_CLOSE, kernel) # Additional dilation to fill gaps color_mask = cv2.dilate(color_mask, kernel, iterations=1) return color_mask, red_mask, blue_mask, green_mask def detect_shapes(self, mask, frame): """Detect rectangular and circular shapes in the mask""" contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) detected_objects = [] for contour in contours: # Filter by area - max_area increased to 150000 area = cv2.contourArea(contour) if area < self.min_area or area > self.max_area: continue # Approximate the contour to a polygon peri = cv2.arcLength(contour, True) approx = cv2.approxPolyDP(contour, 0.04 * peri, True) # Calculate shape properties x, y, w, h = cv2.boundingRect(approx) aspect_ratio = float(w) / h circularity = 4 * np.pi * area / (peri * peri) if peri > 0 else 0 # Detect rectangles (books, boxes) if len(approx) == 4 and self.aspect_ratio_min < aspect_ratio < self.aspect_ratio_max: # Calculate the angles between consecutive edges vectors = [] for i in range(4): pt1 = approx[i][0] pt2 = approx[(i + 1) % 4][0] vectors.append(np.array(pt2) - np.array(pt1)) # Calculate angles between consecutive vectors angles = [] for i in range(4): v1 = vectors[i] v2 = vectors[(i + 1) % 4] cos_angle = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2) + 1e-5) angle = np.arccos(np.clip(cos_angle, -1, 1)) * 180 / np.pi angles.append(angle) # Check if angles are close to 90 degrees (rectangle) if all(70 < angle < 110 for angle in angles): detected_objects.append({ 'type': 'rectangle', 'contour': contour, 'approx': approx, 'center': (x + w // 2, y + h // 2), 'box': (x, y, w, h), 'area': area }) # Detect circles/cylinders elif circularity > self.circularity_thresh: detected_objects.append({ 'type': 'cylinder', 'contour': contour, 'approx': approx, 'center': (x + w // 2, y + h // 2), 'box': (x, y, w, h), 'area': area }) return detected_objects def detect_colors(self, frame, detected_objects, red_mask, blue_mask, green_mask): """Determine the color of detected objects""" results = [] for obj in detected_objects: x, y, w, h = obj['box'] # Create mask for the object region obj_mask = np.zeros(frame.shape[:2], dtype=np.uint8) cv2.drawContours(obj_mask, [obj['contour']], -1, 255, -1) # Extract the object region from color masks obj_red = cv2.bitwise_and(red_mask, red_mask, mask=obj_mask) obj_blue = cv2.bitwise_and(blue_mask, blue_mask, mask=obj_mask) obj_green = cv2.bitwise_and(green_mask, green_mask, mask=obj_mask) # Count red, blue and green pixels in the object region red_pixels = cv2.countNonZero(obj_red) blue_pixels = cv2.countNonZero(obj_blue) green_pixels = cv2.countNonZero(obj_green) total_pixels = cv2.countNonZero(obj_mask) # Determine dominant color color = "unknown" if total_pixels > 0: red_ratio = red_pixels / total_pixels blue_ratio = blue_pixels / total_pixels green_ratio = green_pixels / total_pixels # Require at least 40% dominance for color classification if red_ratio > 0.4 and red_ratio > blue_ratio and red_ratio > green_ratio: color = "red" elif blue_ratio > 0.4 and blue_ratio > red_ratio and blue_ratio > green_ratio: color = "blue" elif green_ratio > 0.4 and green_ratio > red_ratio and green_ratio > blue_ratio: color = "green" # Add to results results.append({ 'type': obj['type'], 'color': color, 'center': obj['center'], 'box': obj['box'], 'contour': obj['contour'], 'timestamp': time.time() }) return results def detect_cuboids(self, frame): """Detect cuboid objects using traditional computer vision techniques""" # Step 1: Preprocess frame to create color mask color_mask, red_mask, blue_mask, green_mask = self.preprocess_frame(frame) # Step 2: Detect shapes detected_objects = self.detect_shapes(color_mask, frame) # Step 3: Detect colors of the shapes results = self.detect_colors(frame, detected_objects, red_mask, blue_mask, green_mask) return results, color_mask def run(self): """Main loop for desktop preview and detection""" logger.info("Starting main detection loop") window_name = "Cuboid Detection (Traditional Algorithm)" cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.resizeWindow(window_name, 1200, 800) # Create mask window cv2.namedWindow("Color Mask", cv2.WINDOW_NORMAL) cv2.resizeWindow("Color Mask", 600, 400) frame_count = 0 start_time = time.time() try: while True: # Start timer for FPS calculation frame_start = time.time() # Get frame from camera ret, frame = self.cap.read() if not ret: logger.warning("Failed to capture frame") time.sleep(0.1) continue frame_count += 1 # Detect cuboids results, color_mask = self.detect_cuboids(frame) # Draw results on frame for result in results: color_type = result['color'] obj_type = result['type'] x, y, w, h = result['box'] center_x, center_y = result['center'] # Determine drawing color based on detected color if color_type == "red": color = (0, 0, 255) # Red in BGR elif color_type == "blue": color = (255, 0, 0) # Blue in BGR elif color_type == "green": color = (0, 255, 0) # Green in BGR else: color = (0, 255, 255) # Yellow for unknown # Draw bounding box cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2) # Draw contour cv2.drawContours(frame, [result['contour']], -1, color, 2) # Draw center point cv2.circle(frame, (center_x, center_y), 5, color, -1) # Draw label label = f"{color_type} {obj_type}" cv2.putText(frame, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2) # Calculate FPS current_time = time.time() elapsed_time = current_time - start_time fps = frame_count / elapsed_time if elapsed_time > 0 else 0 # Update FPS history (keep last 30 values) self.fps_history.append(fps) if len(self.fps_history) > 30: self.fps_history.pop(0) avg_fps = sum(self.fps_history) / len(self.fps_history) if self.fps_history else 0 # Update detection history detection_status = 1 if results else 0 self.detection_history.append(detection_status) if len(self.detection_history) > 30: self.detection_history.pop(0) detection_rate = sum(self.detection_history) / len(self.detection_history) * 100 # Display performance stats stats_y = 30 cv2.putText(frame, f"FPS: {avg_fps:.1f}", (10, stats_y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) cv2.putText(frame, f"Detection Rate: {detection_rate:.1f}%", (10, stats_y + 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 255), 2) cv2.putText(frame, f"Objects: {len(results)}", (10, stats_y + 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 200, 255), 2) # Display algorithm info cv2.putText(frame, "Algorithm: Traditional CV (Contour + Color Analysis)", (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (200, 200, 200), 1) # Show frames cv2.imshow(window_name, frame) cv2.imshow("Color Mask", color_mask) # Process keyboard input key = cv2.waitKey(1) & 0xFF if key == ord('q') or key == 27: # 'q' or ESC break elif key == ord('c'): # Capture screenshot timestamp = time.strftime("%Y%m%d-%H%M%S") filename = f"capture_{timestamp}.png" cv2.imwrite(filename, frame) logger.info(f"Screenshot saved as {filename}") elif key == ord('r'): # Reset detection history self.detection_history = [] self.fps_history = [] start_time = time.time() frame_count = 0 logger.info("Reset performance counters") elif key == ord('d'): # Toggle debug mode cv2.imshow("Red Mask", self.red_mask) cv2.imshow("Green Mask", self.green_mask) cv2.imshow("Blue Mask", self.blue_mask) logger.info("Debug masks displayed") # Calculate processing time frame_time = time.time() - frame_start target_frame_time = 1 / 30 # Target 30 FPS if frame_time < target_frame_time: time.sleep(target_frame_time - frame_time) except Exception as e: logger.error(f"Main loop exception: {str(e)}", exc_info=True) finally: self.cleanup() def cleanup(self): """Clean up resources""" # Release camera if self.cap and self.cap.isOpened(): self.cap.release() logger.info("Camera resources released") # Close windows cv2.destroyAllWindows() logger.info("Program exited safely") def print_controls(): """Print program controls""" print("=" * 70) print("Computer Camera Cuboid Detection System - Traditional Algorithm") print("=" * 70) print("Detects cuboid objects (books, boxes, etc.) using computer vision techniques") print("Supports RED, BLUE and GREEN objects") print("Desktop window shows real-time preview with bounding boxes") print("Second window shows the color mask used for detection") print("=" * 70) print("Controls:") print(" q or ESC - Quit program") print(" c - Capture screenshot") print(" r - Reset performance counters") print(" d - Show debug masks (red, green, blue)") print("=" * 70) print("Detection parameters:") print(" - Red, blue and green color detection in HSV space") print(" - Rectangle detection based on polygon approximation and angle analysis") print(" - Cylinder detection based on circularity metric") print(f" - Max contour area: 150000 (was 50000)") print("=" * 70) if __name__ == "__main__": print_controls() # Default camera index (0 is usually the built-in or first external camera) camera_index = 0 # Allow specifying camera index from command line if len(sys.argv) > 1: try: camera_index = int(sys.argv[1]) print(f"Using camera index: {camera_index}") except ValueError: print(f"Invalid camera index: {sys.argv[1]}, using default (0)") try: detector = CuboidDetectionSystem(camera_index=camera_index) detector.run() except Exception as e: logger.error(f"System startup failed: {str(e)}", exc_info=True) print(f"System startup failed: {str(e)}") 现在就只是简单的将这个桌面运行的代码,完整的转化为在vnc上运行的代码就可以了,因为这个代码的效果是很不错的,我现在只想看看它在vnc上运行时机器狗的识别效果如何!!!! 给出完整的转换代码!!!
最新发布
08-09
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值