661. Image Smoother

本文介绍了一种图像平滑算法,该算法将每个像素的颜色值替换为周围8个像素及自身的平均值(向下取整)。通过这种方法可以实现图像的模糊效果。文章提供了具体的实现代码,并解释了如何处理边界上的像素。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

Given a 2D integer matrix M representing the gray scale of an image, you need to design a smoother to make the gray scale of each cell becomes the average gray scale (rounding down) of all the 8 surrounding cells and itself. If a cell has less than 8 surrounding cells, then use as many as you can.

Example 1:

Input:
[[1,1,1],
 [1,0,1],
 [1,1,1]]
Output:
[[0, 0, 0],
 [0, 0, 0],
 [0, 0, 0]]
Explanation:
For the point (0,0), (0,2), (2,0), (2,2): floor(3/4) = floor(0.75) = 0
For the point (0,1), (1,0), (1,2), (2,1): floor(5/6) = floor(0.83333333) = 0
For the point (1,1): floor(8/9) = floor(0.88888889) = 0

Note:

  1. The value in the given matrix is in the range of [0, 255].
  2. The length and width of the given matrix are in the range of [1, 150].

题意:

给一个二维数组,求它以及八个方位这九个数的平均值,如果有的方位没有的话就算那些有的。

思路:

注意判断边界以及初始化的问题。

代码:

class Solution {
    public int[][] imageSmoother(int[][] M) {
        int r=M.length;
        int c=M[0].length;
        int [][]arr=new int[r][c];
        
        for(int i=0;i<r;i++)
        {
            for(int j=0;j<c;j++)
            {
                int []a=new int [8];
                int ans=1;
                 a[0]=(i-1>=0&&j-1>=0)?M[i-1][j-1]:0;
                ans=(i-1>=0&&j-1>=0)?ans+1:ans;
                 a[1]=(i-1>=0)?M[i-1][j]:0;
                ans=(i-1>=0)?ans+1:ans;
                 a[2]=(j-1>=0)?M[i][j-1]:0;
                ans=(j-1>=0)?ans+1:ans;
                 a[3]=(i+1<r&&j-1>=0)?M[i+1][j-1]:0;
                ans=(i+1<r&&j-1>=0)?ans+1:ans;
                 a[4]=(i+1<r)?M[i+1][j]:0;
                ans=(i+1<r)?ans+1:ans;
                 a[5]=(i+1<r&&j+1<c)?M[i+1][j+1]:0;
                ans=(i+1<r&&j+1<c)?ans+1:ans;
                 a[6]=(j+1<c)?M[i][j+1]:0;
                ans=(j+1<c)?ans+1:ans;
                 a[7]=(i-1>=0&&j+1<c)?M[i-1][j+1]:0;
                ans=(i-1>=0&&j+1<c)?ans+1:ans;
                int sum=M[i][j]+a[0]+a[1]+a[2]+a[3]+a[4]+a[5]+a[6]+a[7];
                arr[i][j]=(int)Math.floor(sum/ans);
            }
        }
        return arr;
    }
}

import requests import time import hashlib import base64 import cv2 import numpy as np import matplotlib.pyplot as plt import threading import json import os import random from datetime import datetime from collections import deque class FaceExpressionAnalyzer: def __init__(self, appid="", api_key="", url="http://tupapi.xfyun.cn/v1/expression", simulation_mode=False): """初始化表情分析器,设置API认证信息和URL""" self.appid = appid self.api_key = api_key self.url = url self.emotion_history = [] self.timestamps = [] self.running = False self.simulation_mode = simulation_mode # 模拟模式开关 self.emotion_smoother = deque(maxlen=5) # 用于平滑情绪输出 # 中英文情绪映射 self.emotion_map = { 'angry': '生气', 'disgust': '厌恶', 'fear': '害怕', 'happy': '开心', 'sad': '悲伤', 'surprise': '惊讶', 'neutral': '中性' } print(f"表情分析器初始化完成,{'模拟模式已启用' if simulation_mode else 'API模式已启用'}") def get_header(self, image_name, image_url=None, image_data=None): """生成API请求头,包含认证信息""" cur_time = str(int(time.time())) # 根据图片数据类型构建不同的请求参数 if image_data is not None: param = json.dumps({"image_name": image_name, "image_url": "", "image_data": image_data}) else: param = json.dumps({"image_name": image_name, "image_url": image_url}) param_base64 = base64.b64encode(param.encode('utf-8')).decode('utf-8') checksum = hashlib.md5((self.api_key + cur_time + param_base64).encode('utf-8')).hexdigest() header = { 'X-CurTime': cur_time, 'X-Param': param_base64, 'X-Appid': self.appid, 'X-CheckSum': checksum, 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8' } return header def simulate_api_response(self, face_count=1): """模拟API响应,支持多人脸识别""" time.sleep(0.5) # 模拟网络延迟 faces = [] for i in range(face_count): expression = random.choice(list(self.emotion_map.keys())) faces.append({ "expression": expression, "face_rectangle": { "top": random.randint(50, 300), "left": random.randint(50 + i*150, 200 + i*150), "width": random.randint(100, 200), "height": random.randint(100, 200) } }) return { "code": 0, "desc": "success", "sid": "simulated_sid", "data": { "faces": faces, "face_num": face_count } } def analyze_image_url(self, image_name, image_url): """分析网络图片中的人脸表情""" if self.simulation_mode: return self.simulate_api_response(random.randint(1, 3)) try: header = self.get_header(image_name, image_url) response = requests.post(self.url, headers=header, timeout=10) return self._parse_response(response) except Exception as e: return {"code": -4, "desc": f"请求异常: {str(e)}"} def analyze_local_image(self, image_name, image_path): """分析本地图片中的人脸表情""" if self.simulation_mode: return self.simulate_api_response(random.randint(1, 3)) try: # 读取图片并转为base64编码 if not os.path.exists(image_path): return {"code": -5, "desc": f"文件不存在: {image_path}"} img = cv2.imread(image_path) if img is None: return {"code": -6, "desc": "无法读取图片文件"} # 压缩图片直到大小合适 quality = 90 while True: _, buffer = cv2.imencode('.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, quality]) image_data = buffer.tobytes() if len(image_data) <= 800 * 1024 or quality <= 10: break quality -= 10 image_base64 = base64.b64encode(image_data).decode('utf-8') header = self.get_header(image_name, image_url="", image_data=image_base64) response = requests.post(self.url, headers=header, timeout=10) return self._parse_response(response) except Exception as e: return {"code": -7, "desc": f"图片处理错误: {str(e)}"} def _parse_response(self, response): """安全解析API响应""" try: response.raise_for_status() data = response.json() # 统一API响应格式 if 'data' in data and 'expression' in data['data']: # 处理单脸响应格式 data['data'] = { "face_num": 1, "faces": [{ "expression": data['data']['expression'], "face_rectangle": data['data']['face_rectangle'] }] } return data except requests.exceptions.JSONDecodeError: return {"code": -1, "desc": "JSON解析错误", "raw_response": response.text[:200]} except requests.exceptions.HTTPError as e: return {"code": -2, "desc": f"HTTP错误: {str(e)}", "status_code": response.status_code} except Exception as e: return {"code": -3, "desc": f"请求异常: {str(e)}"} def translate_emotion(self, emotion_en): """将英文情绪翻译为中文""" return self.emotion_map.get(emotion_en, emotion_en) def capture_and_analyze(self): """从摄像头捕获图像并分析表情,支持多人脸""" cap = cv2.VideoCapture(0) if not cap.isOpened(): print("无法打开摄像头") return None, None, None # 尝试多次捕获 for _ in range(3): ret, frame = cap.read() if ret: break time.sleep(0.1) cap.release() if not ret: print("无法捕获图像") return None, None, None # 保存临时图像 timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") image_name = f"face_{timestamp}.jpg" image_path = f"temp_{timestamp}.jpg" cv2.imwrite(image_path, frame) # 分析图像 result = self.analyze_local_image(image_name, image_path) # 删除临时文件 try: os.remove(image_path) except: pass return result, frame, image_name def start_tracking(self, interval=3): """开始定时拍照并分析表情,支持多人脸""" if self.running: print("追踪已在运行中") return self.running = True self.emotion_history = [] self.timestamps = [] print(f"开始表情追踪,每{interval}秒拍摄一次...") def track_loop(): while self.running: result, frame, image_name = self.capture_and_analyze() timestamp = datetime.now().strftime("%H:%M:%S") if result and 'code' in result and result['code'] == 0: if 'data' in result and 'face_num' in result['data']: face_num = result['data']['face_num'] print(f"检测到 {face_num} 张人脸") # 处理每张人脸 expressions = [] for face in result['data']['faces']: expression_en = face['expression'] expression_cn = self.translate_emotion(expression_en) expressions.append(expression_cn) # 绘制人脸框和表情 rect = face['face_rectangle'] if isinstance(rect, list): rect = rect[0] # 处理单脸格式 top = rect['top'] left = rect['left'] width = rect['width'] height = rect['height'] # 在图像上绘制结果 cv2.rectangle(frame, (left, top), (left+width, top+height), (0, 255, 0), 2) cv2.putText(frame, expression_cn, (left, top-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2) # 平滑处理主要表情 if expressions: self.emotion_smoother.append(expressions[0]) smoothed_expression = max(set(self.emotion_smoother), key=list(self.emotion_smoother).count) self.emotion_history.append(smoothed_expression) self.timestamps.append(timestamp) print(f"时间: {timestamp}, 主要表情: {smoothed_expression}") if frame is not None: cv2.imshow('多人脸表情分析', frame) cv2.waitKey(1) else: print(f"分析成功但未检测到人脸: {result}") elif result: print(f"分析失败: {result.get('desc', '未知错误')}") else: print("未获取到分析结果") # 等待指定间隔,但允许提前退出 start_time = time.time() while self.running and (time.time() - start_time) < interval: time.sleep(0.1) self.tracking_thread = threading.Thread(target=track_loop) self.tracking_thread.daemon = True self.tracking_thread.start() def stop_tracking(self): """停止表情追踪""" if not self.running: print("追踪未运行") return self.running = False if hasattr(self, 'tracking_thread') and self.tracking_thread.is_alive(): self.tracking_thread.join(timeout=2.0) cv2.destroyAllWindows() print("已停止表情追踪") def plot_emotion_history(self): """绘制情感变化历史图表""" if not self.emotion_history: print("没有情感历史数据") return # 中文情绪列表 emotions_cn = list(self.emotion_map.values()) emotion_to_index = {emotion: idx for idx, emotion in enumerate(emotions_cn)} # 为每个情绪分配颜色 colors = ['red', 'green', 'purple', 'yellow', 'blue', 'orange', 'gray'] emotion_colors = {emotion: color for emotion, color in zip(emotions_cn, colors)} plt.figure(figsize=(12, 6)) # 饼图:表情分布 plt.subplot(1, 2, 1) emotion_counts = {emotion: 0 for emotion in emotions_cn} for emotion in self.emotion_history: if emotion in emotion_counts: emotion_counts[emotion] += 1 # 过滤掉计数为0的表情 filtered_emotions = [e for e in emotions_cn if emotion_counts[e] > 0] filtered_counts = [emotion_counts[e] for e in filtered_emotions] filtered_colors = [emotion_colors[e] for e in filtered_emotions] plt.pie(filtered_counts, labels=filtered_emotions, colors=filtered_colors, autopct='%1.1f%%') plt.title('表情分布') # 折线图:情感变化时间线 plt.subplot(1, 2, 2) emotion_indices = [emotion_to_index[e] for e in self.emotion_history] plt.plot(self.timestamps, emotion_indices, 'o-') # 设置Y轴为表情标签 plt.yticks(range(len(emotions_cn)), emotions_cn) plt.title('情感变化时间线') plt.xlabel('时间') plt.ylabel('表情') plt.xticks(rotation=45) plt.tight_layout() plt.show() # 主函数 def main(): print("="*40) print("多人脸表情分析系统") print("="*40) # 配置选项 print("\n请选择运行模式:") print("1. 模拟模式 (无需API密钥)") print("2. API模式 (需要有效API密钥)") mode_choice = input("请输入选择 (1/2): ").strip() simulation_mode = mode_choice == "1" if simulation_mode: analyzer = FaceExpressionAnalyzer(simulation_mode=True) else: APPID = input("请输入APPID: ").strip() API_KEY = input("请输入API_KEY: ").strip() if not APPID or not API_KEY: print("警告: APPID或API_KEY为空,将切换到模拟模式") analyzer = FaceExpressionAnalyzer(simulation_mode=True) else: analyzer = FaceExpressionAnalyzer(APPID, API_KEY) while True: print("\n===== 主菜单 =====") print("1. 分析网络图片") print("2. 分析本地图片") print("3. 拍照分析") print("4. 开始定时分析") print("5. 停止定时分析") print("6. 查看情感历史图表") print("0. 退出") choice = input("请选择功能: ").strip() if choice == "1": image_name = input("输入图片名称 (默认: test.jpg): ") or "test.jpg" image_url = input("输入图片URL: ") if image_url: result = analyzer.analyze_image_url(image_name, image_url) print("\n分析结果:") self._display_results(result) else: print("错误: URL不能为空") elif choice == "2": image_name = input("输入图片名称 (默认: test.jpg): ") or "test.jpg" image_path = input("输入图片路径: ") if image_path: result = analyzer.analyze_local_image(image_name, image_path) print("\n分析结果:") self._display_results(result) else: print("错误: 图片路径不能为空") elif choice == "3": print("请面对摄像头准备拍照...") result, frame, image_name = analyzer.capture_and_analyze() if result: print("\n分析结果:") self._display_results(result) if frame is not None: # 在图像上绘制结果 if 'code' in result and result['code'] == 0 and 'data' in result: for face in result['data']['faces']: expr_en = face['expression'] expr_cn = analyzer.translate_emotion(expr_en) # 绘制人脸框 rect = face['face_rectangle'] if isinstance(rect, list): rect = rect[0] # 处理单脸格式 top = rect['top'] left = rect['left'] width = rect['width'] height = rect['height'] cv2.rectangle(frame, (left, top), (left+width, top+height), (0, 255, 0), 2) cv2.putText(frame, f"表情: {expr_cn}", (left, top-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2) cv2.imshow('多人脸表情分析', frame) cv2.waitKey(0) cv2.destroyAllWindows() elif choice == "4": interval = input("输入采集间隔(秒,默认3秒): ").strip() try: interval = int(interval) if interval else 3 analyzer.start_tracking(interval) except ValueError: print("无效的间隔时间,使用默认值3秒") analyzer.start_tracking() elif choice == "5": analyzer.stop_tracking() elif choice == "6": analyzer.plot_emotion_history() elif choice == "0": if analyzer.running: analyzer.stop_tracking() print("程序已退出") break else: print("无效选择,请输入0-6之间的数字") def _display_results(self, result): """显示分析结果(中文)""" if not result: print("无有效结果") return if 'code' in result and result['code'] == 0: if 'data' in result and 'face_num' in result['data']: face_num = result['data']['face_num'] print(f"检测到 {face_num} 张人脸:") for i, face in enumerate(result['data']['faces'], 1): expr_en = face['expression'] expr_cn = self.translate_emotion(expr_en) print(f"人脸 {i}: {expr_cn}") else: print("分析成功但未检测到人脸") else: error_desc = result.get('desc', '未知错误') print(f"分析失败: {error_desc}") if __name__ == "__main__": main() 讲解一下这个代码
最新发布
06-20
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值