import os
import cv2
import base64
import json
import numpy as np
from channels.generic.websocket import AsyncWebsocketConsumer
from django.conf import settings
from ultralytics import YOLO
import time
import asyncio
import logging
import uuid
import json
import cv2
import base64
from index.page_4_1_auto_set import image_detect
logger = logging.getLogger(__name__)
class VideoDetectionConsumer(AsyncWebsocketConsumer):
async def connect(self):
logger.info(f"WebSocket 连接尝试: {self.scope}")
await self.accept()
logger.info("WebSocket 连接已建立")
self.last_time = time.time()
self.start_time = time.time()
self.frame_count = 0
self.total_processing_time = 0
async def disconnect(self, close_code):
pass
async def receive(self, text_data=None, bytes_data=None):
if text_data:
text_data_json = json.loads(text_data)
action = text_data_json.get('action')
video_name = text_data_json.get('video')
if action == 'start_detection':
# 确保临时目录存在
temp_dir = os.path.join(settings.BASE_DIR, 'temp')
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
video_path = os.path.join(temp_dir, video_name)
# 检查视频文件是否存在
if not os.path.exists(video_path):
await self.send(text_data=json.dumps({
'type': 'error',
'message': f'视频文件不存在: {video_path}'
}))
return
model_path = os.path.join(settings.BASE_DIR, "C:/Users/16660/Desktop/网页搭建/Behaviewer/models/best.pt")
output_video_path = os.path.join(settings.MEDIA_ROOT, 'videos', video_name)
output_video_dir = os.path.dirname(output_video_path)
if not os.path.exists(output_video_dir):
os.makedirs(output_video_dir)
# 启动视频处理任务
asyncio.create_task(self.detect_objects_in_video(model_path, video_path, output_video_path))
async def detect_objects_in_video(self, model_path, video_path, output_path):
try:
# 加载模型
model = YOLO(model_path)
# 打开视频
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
await self.send(text_data=json.dumps({
'type': 'error',
'message': f'无法打开视频文件: {video_path}'
}))
return
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# 创建视频写入器
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
frame_index = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# 处理帧
frame_index += 1
start_time = time.time()
# 目标检测
results = model(frame)
annotated_frame = results[0].plot()
# 计算处理时间
processing_time = time.time() - start_time
self.total_processing_time += processing_time
self.frame_count += 1
# 计算当前FPS
current_fps = 1.0 / processing_time if processing_time > 0 else 0
# 添加FPS显示
fps_text = f"FPS: {current_fps:.2f}"
cv2.putText(annotated_frame, fps_text, (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# 保存处理后的帧
out.write(annotated_frame)
# 将处理后的帧转换为base64
_, buffer = cv2.imencode('.jpg', annotated_frame)
frame_base64 = base64.b64encode(buffer).decode('utf-8')
# 计算进度
progress = frame_index / total_frames
# 发送处理后的帧
await self.send(text_data=json.dumps({
'type': 'frame',
'frame': frame_base64,
'objects': len(results[0].boxes),
'fps': current_fps,
'progress': progress
}))
# 稍微延迟以控制发送速率
await asyncio.sleep(0.01)
# 释放资源
cap.release()
out.release()
# 计算平均FPS
avg_fps = self.frame_count / self.total_processing_time if self.total_processing_time > 0 else 0
# 发送完成消息
output_video_url = f'{settings.MEDIA_URL}videos/{os.path.basename(output_path)}'
await self.send(text_data=json.dumps({
'type': 'end',
'output_video_url': output_video_url,
'total_frames': total_frames,
'avg_fps': avg_fps,
'fps': fps
}))
except Exception as e:
await self.send(text_data=json.dumps({
'type': 'error',
'message': f'处理错误: {str(e)}'
}))
import traceback
traceback.print_exc()
logger = logging.getLogger(__name__)
class VideoProcessingConsumer(AsyncWebsocketConsumer):
async def connect(self):
logger.info("WebSocket 连接已建立")
await self.accept()
self.processing = False
self.video_path = None
self.config = None
self.temp_video_path = None
self.cap = None
self.last_update_time = time.time()
async def disconnect(self, close_code):
logger.info("WebSocket 连接已关闭")
self.processing = False
# 清理资源
if self.cap:
self.cap.release()
logger.info("视频捕获资源已释放")
# 清理临时文件
if self.temp_video_path and os.path.exists(self.temp_video_path):
try:
os.remove(self.temp_video_path)
logger.info(f"已删除临时文件: {self.temp_video_path}")
except Exception as e:
logger.error(f"删除临时文件失败: {str(e)}")
async def receive(self, text_data):
try:
data = json.loads(text_data)
command = data.get('command')
if command == 'start_processing':
# 停止任何正在进行的处理
self.processing = False
# 获取参数
video_filename = data.get('video_path')
self.config = data.get('config')
if not video_filename or not self.config:
await self.send_error('缺少必要参数: video_path 或 config')
return
# 创建临时目录 (使用统一的临时目录)
temp_dir = os.path.join(settings.BASE_DIR, 'temp')
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
logger.info(f"创建临时目录: {temp_dir}")
# 构建视频文件路径
self.temp_video_path = os.path.join(temp_dir, video_filename)
logger.info(f"视频文件路径: {self.temp_video_path}")
# 检查视频文件是否存在
if not os.path.exists(self.temp_video_path):
await self.send_error(f'视频文件不存在: {self.temp_video_path}')
return
# 验证视频文件格式
if not self.is_valid_video_file(self.temp_video_path):
await self.send_error(f'无效的视频格式: {self.temp_video_path}')
return
# 开始处理
self.processing = True
self.last_update_time = time.time()
asyncio.create_task(self.process_video())
elif command == 'stop_processing':
self.processing = False
await self.send_status('处理已停止')
except Exception as e:
logger.error(f"接收消息错误: {str(e)}")
await self.send_error(f'处理错误: {str(e)}')
def is_valid_video_file(self, file_path):
"""验证是否为有效的视频文件"""
try:
cap = cv2.VideoCapture(file_path)
if not cap.isOpened():
return False
# 检查帧数和尺寸
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cap.release()
# 基本验证
if frame_count <= 0 or width <= 0 or height <= 0:
return False
return True
except:
return False
async def send_error(self, message):
await self.send(text_data=json.dumps({
'type': 'error',
'message': message
}))
async def send_status(self, message):
await self.send(text_data=json.dumps({
'type': 'status',
'message': message
}))
async def send_progress(self, frame_count, total_frames, fps):
"""发送处理进度"""
progress = int((frame_count / total_frames) * 100)
await self.send(text_data=json.dumps({
'type': 'progress',
'progress': progress,
'processed_frames': frame_count,
'fps': fps
}))
async def process_video(self):
"""异步处理视频帧并发送结果"""
try:
# 打开视频文件
self.cap = cv2.VideoCapture(self.temp_video_path)
if not self.cap.isOpened():
await self.send_error(f"无法打开视频文件: {self.temp_video_path}")
return
# 获取视频信息
total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = self.cap.get(cv2.CAP_PROP_FPS)
width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
logger.info(f"视频信息: {total_frames}帧, {fps} FPS, {width}x{height}")
# 发送初始信息
await self.send(text_data=json.dumps({
'type': 'info',
'total_frames': total_frames,
'fps': fps
}))
frame_count = 0
while self.processing and self.cap.isOpened():
ret, frame = self.cap.read()
if not ret:
break
# 处理当前帧
processed_frame, _ = image_detect(
frame,
self.config['roi'],
currentback=self.config.get('currentback', 0),
kernal_erode=self.config.get('kernal_erode', 1),
kernal_dilate=self.config.get('kernal_dilate', 1),
kernal_erode_2=self.config.get('kernal_erode_2', 1),
min_area=self.config.get('min_area', 1),
max_area=self.config.get('max_area', 10000),
adjust_threshold=self.config.get('adjust_threshold', 150),
feature=self.config.get('feature', 1)
)
# 转换为base64
_, buffer = cv2.imencode('.jpg', processed_frame)
frame_base64 = base64.b64encode(buffer).decode('utf-8')
# 发送处理后的帧
await self.send(text_data=json.dumps({
'type': 'frame',
'frame': frame_base64,
'frame_count': frame_count
}))
frame_count += 1
# 定期发送进度 (每秒最多1次)
current_time = time.time()
if current_time - self.last_update_time > 1.0: # 每秒更新一次
await self.send_progress(frame_count, total_frames, fps)
self.last_update_time = current_time
# 处理完成
if self.processing:
# 发送最终进度
await self.send_progress(frame_count, total_frames, fps)
# 生成最终结果图像
_, final_buffer = cv2.imencode('.jpg', processed_frame)
final_image = base64.b64encode(final_buffer).decode('utf-8')
await self.send(text_data=json.dumps({
'type': 'end',
'result_image': final_image,
'processed_frames': frame_count,
'total_frames': total_frames
}))
except Exception as e:
logger.error(f"视频处理错误: {str(e)}")
await self.send_error(f'视频处理错误: {str(e)}')
finally:
self.processing = False
if self.cap:
self.cap.release()
self.cap = None报错import os
import cv2
import base64
import json
import numpy as np
from channels.generic.websocket import AsyncWebsocketConsumer
from django.conf import settings
from ultralytics import YOLO
import time
import asyncio
import logging
import uuid
import json
import cv2
import base64
from index.page_4_1_auto_set import image_detect
logger = logging.getLogger(__name__)
class VideoDetectionConsumer(AsyncWebsocketConsumer):
async def connect(self):
logger.info(f"WebSocket 连接尝试: {self.scope}")
await self.accept()
logger.info("WebSocket 连接已建立")
self.last_time = time.time()
self.start_time = time.time()
self.frame_count = 0
self.total_processing_time = 0
async def disconnect(self, close_code):
pass
async def receive(self, text_data=None, bytes_data=None):
if text_data:
text_data_json = json.loads(text_data)
action = text_data_json.get('action')
video_name = text_data_json.get('video')
if action == 'start_detection':
# 确保临时目录存在
temp_dir = os.path.join(settings.BASE_DIR, 'temp')
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
video_path = os.path.join(temp_dir, video_name)
# 检查视频文件是否存在
if not os.path.exists(video_path):
await self.send(text_data=json.dumps({
'type': 'error',
'message': f'视频文件不存在: {video_path}'
}))
return
model_path = os.path.join(settings.BASE_DIR, "C:/Users/16660/Desktop/网页搭建/Behaviewer/models/best.pt")
output_video_path = os.path.join(settings.MEDIA_ROOT, 'videos', video_name)
output_video_dir = os.path.dirname(output_video_path)
if not os.path.exists(output_video_dir):
os.makedirs(output_video_dir)
# 启动视频处理任务
asyncio.create_task(self.detect_objects_in_video(model_path, video_path, output_video_path))
async def detect_objects_in_video(self, model_path, video_path, output_path):
try:
# 加载模型
model = YOLO(model_path)
# 打开视频
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
await self.send(text_data=json.dumps({
'type': 'error',
'message': f'无法打开视频文件: {video_path}'
}))
return
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# 创建视频写入器
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
frame_index = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# 处理帧
frame_index += 1
start_time = time.time()
# 目标检测
results = model(frame)
annotated_frame = results[0].plot()
# 计算处理时间
processing_time = time.time() - start_time
self.total_processing_time += processing_time
self.frame_count += 1
# 计算当前FPS
current_fps = 1.0 / processing_time if processing_time > 0 else 0
# 添加FPS显示
fps_text = f"FPS: {current_fps:.2f}"
cv2.putText(annotated_frame, fps_text, (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# 保存处理后的帧
out.write(annotated_frame)
# 将处理后的帧转换为base64
_, buffer = cv2.imencode('.jpg', annotated_frame)
frame_base64 = base64.b64encode(buffer).decode('utf-8')
# 计算进度
progress = frame_index / total_frames
# 发送处理后的帧
await self.send(text_data=json.dumps({
'type': 'frame',
'frame': frame_base64,
'objects': len(results[0].boxes),
'fps': current_fps,
'progress': progress
}))
# 稍微延迟以控制发送速率
await asyncio.sleep(0.01)
# 释放资源
cap.release()
out.release()
# 计算平均FPS
avg_fps = self.frame_count / self.total_processing_time if self.total_processing_time > 0 else 0
# 发送完成消息
output_video_url = f'{settings.MEDIA_URL}videos/{os.path.basename(output_path)}'
await self.send(text_data=json.dumps({
'type': 'end',
'output_video_url': output_video_url,
'total_frames': total_frames,
'avg_fps': avg_fps,
'fps': fps
}))
except Exception as e:
await self.send(text_data=json.dumps({
'type': 'error',
'message': f'处理错误: {str(e)}'
}))
import traceback
traceback.print_exc()
logger = logging.getLogger(__name__)
class VideoProcessingConsumer(AsyncWebsocketConsumer):
async def connect(self):
logger.info("WebSocket 连接已建立")
await self.accept()
self.processing = False
self.video_path = None
self.config = None
self.temp_video_path = None
self.cap = None
self.last_update_time = time.time()
async def disconnect(self, close_code):
logger.info("WebSocket 连接已关闭")
self.processing = False
# 清理资源
if self.cap:
self.cap.release()
logger.info("视频捕获资源已释放")
# 清理临时文件
if self.temp_video_path and os.path.exists(self.temp_video_path):
try:
os.remove(self.temp_video_path)
logger.info(f"已删除临时文件: {self.temp_video_path}")
except Exception as e:
logger.error(f"删除临时文件失败: {str(e)}")
async def receive(self, text_data):
try:
data = json.loads(text_data)
command = data.get('command')
if command == 'start_processing':
# 停止任何正在进行的处理
self.processing = False
# 获取参数
video_filename = data.get('video_path')
self.config = data.get('config')
if not video_filename or not self.config:
await self.send_error('缺少必要参数: video_path 或 config')
return
# 创建临时目录 (使用统一的临时目录)
temp_dir = os.path.join(settings.BASE_DIR, 'temp')
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
logger.info(f"创建临时目录: {temp_dir}")
# 构建视频文件路径
self.temp_video_path = os.path.join(temp_dir, video_filename)
logger.info(f"视频文件路径: {self.temp_video_path}")
# 检查视频文件是否存在
if not os.path.exists(self.temp_video_path):
await self.send_error(f'视频文件不存在: {self.temp_video_path}')
return
# 验证视频文件格式
if not self.is_valid_video_file(self.temp_video_path):
await self.send_error(f'无效的视频格式: {self.temp_video_path}')
return
# 开始处理
self.processing = True
self.last_update_time = time.time()
asyncio.create_task(self.process_video())
elif command == 'stop_processing':
self.processing = False
await self.send_status('处理已停止')
except Exception as e:
logger.error(f"接收消息错误: {str(e)}")
await self.send_error(f'处理错误: {str(e)}')
def is_valid_video_file(self, file_path):
"""验证是否为有效的视频文件"""
try:
cap = cv2.VideoCapture(file_path)
if not cap.isOpened():
return False
# 检查帧数和尺寸
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cap.release()
# 基本验证
if frame_count <= 0 or width <= 0 or height <= 0:
return False
return True
except:
return False
async def send_error(self, message):
await self.send(text_data=json.dumps({
'type': 'error',
'message': message
}))
async def send_status(self, message):
await self.send(text_data=json.dumps({
'type': 'status',
'message': message
}))
async def send_progress(self, frame_count, total_frames, fps):
"""发送处理进度"""
progress = int((frame_count / total_frames) * 100)
await self.send(text_data=json.dumps({
'type': 'progress',
'progress': progress,
'processed_frames': frame_count,
'fps': fps
}))
async def process_video(self):
"""异步处理视频帧并发送结果"""
try:
# 打开视频文件
self.cap = cv2.VideoCapture(self.temp_video_path)
if not self.cap.isOpened():
await self.send_error(f"无法打开视频文件: {self.temp_video_path}")
return
# 获取视频信息
total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = self.cap.get(cv2.CAP_PROP_FPS)
width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
logger.info(f"视频信息: {total_frames}帧, {fps} FPS, {width}x{height}")
# 发送初始信息
await self.send(text_data=json.dumps({
'type': 'info',
'total_frames': total_frames,
'fps': fps
}))
frame_count = 0
while self.processing and self.cap.isOpened():
ret, frame = self.cap.read()
if not ret:
break
# 处理当前帧
processed_frame, _ = image_detect(
frame,
self.config['roi'],
currentback=self.config.get('currentback', 0),
kernal_erode=self.config.get('kernal_erode', 1),
kernal_dilate=self.config.get('kernal_dilate', 1),
kernal_erode_2=self.config.get('kernal_erode_2', 1),
min_area=self.config.get('min_area', 1),
max_area=self.config.get('max_area', 10000),
adjust_threshold=self.config.get('adjust_threshold', 150),
feature=self.config.get('feature', 1)
)
# 转换为base64
_, buffer = cv2.imencode('.jpg', processed_frame)
frame_base64 = base64.b64encode(buffer).decode('utf-8')
# 发送处理后的帧
await self.send(text_data=json.dumps({
'type': 'frame',
'frame': frame_base64,
'frame_count': frame_count
}))
frame_count += 1
# 定期发送进度 (每秒最多1次)
current_time = time.time()
if current_time - self.last_update_time > 1.0: # 每秒更新一次
await self.send_progress(frame_count, total_frames, fps)
self.last_update_time = current_time
# 处理完成
if self.processing:
# 发送最终进度
await self.send_progress(frame_count, total_frames, fps)
# 生成最终结果图像
_, final_buffer = cv2.imencode('.jpg', processed_frame)
final_image = base64.b64encode(final_buffer).decode('utf-8')
await self.send(text_data=json.dumps({
'type': 'end',
'result_image': final_image,
'processed_frames': frame_count,
'total_frames': total_frames
}))
except Exception as e:
logger.error(f"视频处理错误: {str(e)}")
await self.send_error(f'视频处理错误: {str(e)}')
finally:
self.processing = False
if self.cap:
self.cap.release()
self.cap = None报错处理错误: 视频文件不存在: C:\Users\16660\Desktop\网页搭建\Behaviewer\temp\1.mp4