plot_arr.cpp

  name="google_ads_frame" marginwidth="0" marginheight="0" src="http://pagead2.googlesyndication.com/pagead/ads?client=ca-pub-5572165936844014&dt=1194442938015&lmt=1194190197&format=336x280_as&output=html&correlator=1194442937843&url=file%3A%2F%2F%2FC%3A%2FDocuments%2520and%2520Settings%2Flhh1%2F%E6%A1%8C%E9%9D%A2%2FCLanguage.htm&color_bg=FFFFFF&color_text=000000&color_link=000000&color_url=FFFFFF&color_border=FFFFFF&ad_type=text&ga_vid=583001034.1194442938&ga_sid=1194442938&ga_hid=1942779085&flash=9&u_h=768&u_w=1024&u_ah=740&u_aw=1024&u_cd=32&u_tz=480&u_java=true" frameborder="0" width="336" scrolling="no" height="280" allowtransparency="allowtransparency"> #include <iostream.h>
#include <strstrea.h>

const int size = 5;

class plot
{
   int x, y;
 public:
   plot(int i, int j)
    {
      if(i>size)
         i = size;
      if(i<0)
         i = 0;
      if(j>size)
         j = size;
      if(j<0)
         j=0;
      x=i;
      y=j;
    }
   friend ostream &operator<<(ostream &stream, plot obj);
};

ostream &operator<<(ostream & stream, plot obj)
 {
   register int i, j;

   for(j=size; j>=0; j--)
    {
      stream << j;
      if(j==obj.y)
       {
         for(i=0; i<obj.x; i++)
            stream << "  ";
            stream << '*';
       }
      stream << endl;
    }
   for(i=0; i<=size; i++)
      stream << " " << i;
   stream << endl;
   return stream;
 }

void main(void)
 {
   plot a(2,3), b(1,1);
   char str[200];

   cout << "Output using cout:" << endl;
   cout << a << endl << b << endl << endl;

   ostrstream outs(str, sizeof(str));
   outs << a << b << ends;
   cout << "output using in-RAM formatting:" << endl;
   cout << str;
 }

 

C:\Users\ZXDN\miniconda3\envs\pytorch\python.exe C:\Users\ZXDN\PyCharmMiscProject\草稿2.py A module that was compiled using NumPy 1.x cannot be run in NumPy 2.0.1 as it may crash. To support both 1.x and 2.x versions of NumPy, modules must be compiled with NumPy 2.0. Some module may need to rebuild instead e.g. with 'pybind11>=2.12'. If you are a user of the module, the easiest solution will be to downgrade to 'numpy<2' or try to upgrade the affected module. We expect that some modules will need time to support NumPy 2. Traceback (most recent call last): File "C:\Users\ZXDN\PyCharmMiscProject\草稿2.py", line 4, in <module> import torch File "C:\Users\ZXDN\miniconda3\envs\pytorch\lib\site-packages\torch\__init__.py", line 1382, in <module> from .functional import * # noqa: F403 File "C:\Users\ZXDN\miniconda3\envs\pytorch\lib\site-packages\torch\functional.py", line 7, in <module> import torch.nn.functional as F File "C:\Users\ZXDN\miniconda3\envs\pytorch\lib\site-packages\torch\nn\__init__.py", line 1, in <module> from .modules import * # noqa: F403 File "C:\Users\ZXDN\miniconda3\envs\pytorch\lib\site-packages\torch\nn\modules\__init__.py", line 35, in <module> from .transformer import TransformerEncoder, TransformerDecoder, \ File "C:\Users\ZXDN\miniconda3\envs\pytorch\lib\site-packages\torch\nn\modules\transformer.py", line 20, in <module> device: torch.device = torch.device(torch._C._get_default_device()), # torch.device('cpu'), C:\Users\ZXDN\miniconda3\envs\pytorch\lib\site-packages\torch\nn\modules\transformer.py:20: UserWarning: Failed to initialize NumPy: _ARRAY_API not found (Triggered internally at C:\cb\pytorch_1000000000000\work\torch\csrc\utils\tensor_numpy.cpp:84.) device: torch.device = torch.device(torch._C._get_default_device()), # torch.device('cpu'), Traceback (most recent call last): File "C:\Users\ZXDN\miniconda3\envs\pytorch\lib\site-packages\pandas\core\indexes\base.py", line 3805, in get_loc return self._engine.get_loc(casted_key) File "index.pyx", line 167, in pandas._libs.index.IndexEngine.get_loc File "index.pyx", line 196, in pandas._libs.index.IndexEngine.get_loc File "pandas\\_libs\\hashtable_class_helper.pxi", line 7081, in pandas._libs.hashtable.PyObjectHashTable.get_item File "pandas\\_libs\\hashtable_class_helper.pxi", line 7089, in pandas._libs.hashtable.PyObjectHashTable.get_item KeyError: 'OT' The above exception was the direct cause of the following exception: Traceback (most recent call last): File "C:\Users\ZXDN\PyCharmMiscProject\草稿2.py", line 24, in <module> true_data = np.array(true_data['OT']) File "C:\Users\ZXDN\miniconda3\envs\pytorch\lib\site-packages\pandas\core\frame.py", line 4102, in __getitem__ indexer = self.columns.get_loc(key) File "C:\Users\ZXDN\miniconda3\envs\pytorch\lib\site-packages\pandas\core\indexes\base.py", line 3812, in get_loc raise KeyError(key) from err KeyError: 'OT' 进程已结束,退出代码为 1 结合此运行结果帮我修改上述代码
06-23
#include "Blood_OxygenDetection.h" #ifdef _WIN64 #define MATLAB_PLOT (1) uint8_t spo2_plot_flag = 0; #include <cstdlib> #include <string> #include <iomanip> #include <fstream> using namespace std; #include "plot_bymatlab.h" #include <iomanip> #include <cstdio> #include <iostream> #endif // _WIN64 #define RED_IRD_SIGNAL_LEN ( 125 ) #define SPO2_SIGNAL_SAMPLE ( 25 ) #define SPO2_SIG_SEC ( RED_IRD_SIGNAL_LEN / SPO2_SIGNAL_SAMPLE ) #define SIG_MAP_TH ( 10 ) #define SIG_SATURATION ( 65000 ) #define SPO2_FILTER_LEN ( 7 ) #define SPO2_RATE_ARR_LEN ( 10 ) #define SPO2_AC_RATE_LEN ( 10 ) #define SPO2_RATE_COMPENSATE1 ( 50 ) //对应血氧值100 #define SPO2_RATE_COMPENSATE2 ( 56 ) //对应血氧值99 int64_t oxy_band_bz[SPO2_FILTER_LEN] = { 1809, 0, -5430, 0, 5429, 0, -1810 }; int64_t oxy_band_az[SPO2_FILTER_LEN] = { 100000, -452867, 873235, -923467, 567242, -191903, 27805 }; int64_t oxy_band_zi[SPO2_FILTER_LEN - 1] = { -6253,13873,-19506,21536,-9103 - 574 }; //int64_t oxy_band_bz[SPO2_FILTER_LEN] = { 1462 ,0, -4388, 0 , 4387 , 0 ,-1463 }; //int64_t oxy_band_az[SPO2_FILTER_LEN] = { 100000 ,- 452947, 881212, -946581 , 593706, -206172, 30917 }; //int64_t oxy_band_zi[SPO2_FILTER_LEN - 1] = { -2944 , 3766 ,-4901 , 9123 ,-4060 ,-1005 }; uint8_t spo2_res_arr_check[20] = { 0 }; bod_result_info_t bod_info_s = { 0 }; uint8_t spo2_cnt = 0; uint8_t spo2_calc_cnt = 0; uint8_t spo2_res_mark = 0; int32_t change_rate_value = 0; uint8_t spo2_res_wave_flag[2] = { 0,0 }; uint8_t kalman_last_data = 0; uint8_t spo2_smooth_temp[3] = { 0,0,0 }; uint8_t spo2_real_value_cnt = 0; int32_t spo2_rate_arr[SPO2_RATE_ARR_LEN] = { 0 }; int32_t last_spo2_rate = 0; uint8_t last_spo2_out = 0; uint8_t spo2_rate_std_30_cnt = 0; int32_t spo2_ac_rate_arr[SPO2_AC_RATE_LEN] = { 0 }; int32_t spo2_ac_mean_arr[SPO2_AC_RAT
03-22
import os import time import cv2 import numpy as np import base64 import json import threading from .page_4_1_auto_set import image_detect from django.conf import settings from django.views.decorators.csrf import csrf_exempt from django.shortcuts import render, redirect from django.contrib import messages from django.contrib.auth import authenticate, login, logout from django.contrib.auth.decorators import login_required from django.contrib.auth.forms import PasswordResetForm from django.contrib.auth.tokens import default_token_generator from django.utils.encoding import force_bytes, force_str from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode from django.core.mail import send_mail from django.contrib.auth.models import User from .models import UserInfo from ultralytics import YOLO from django.forms import Form, FileField from django.contrib.auth.forms import AuthenticationForm from django.http import JsonResponse, StreamingHttpResponse class VideoUploadForm(Form): video = FileField() def wechat_login(request): code = request.GET.get('code') if code: # 获取 access_token url = f'https://api.weixin.qq.com/sns/oauth2/access_token?appid={settings.WECHAT_APPID}&secret={settings.WECHAT_SECRET}&code={code}&grant_type=authorization_code' import requests response = requests.get(url) data = response.json() if 'access_token' in data: access_token = data['access_token'] openid = data['openid'] # 获取用户信息 user_info_url = f'https://api.weixin.qq.com/sns/userinfo?access_token={access_token}&openid={openid}' user_info_response = requests.get(user_info_url) user_info = user_info_response.json() # 处理用户信息,例如创建或登录用户 # ... return redirect('home') else: # 重定向到微信授权页面 authorize_url = f'https://open.weixin.qq.com/connect/oauth2/authorize?appid={settings.WECHAT_APPID}&redirect_uri={settings.WECHAT_REDIRECT_URI}&response_type=code&scope=snsapi_userinfo&state=STATE#wechat_redirect' return redirect(authorize_url) def register(request): if request.method == 'POST': username = request.POST.get('username') email = request.POST.get('email') password1 = request.POST.get('password1') password2 = request.POST.get('password2') # 检查用户名是否已存在 if UserInfo.objects.filter(user__username=username).exists(): messages.error(request, '用户名已注册,请选择其他用户名!') return render(request, 'register.html', { 'username': username, 'email': email, 'error': 'username_exists' }, status=400) # 检查用户名长度 if len(username) < 3: messages.error(request, '用户名至少3个字符!') return render(request, 'register.html', { 'username': username, 'email': email, 'error': 'username_short' }, status=400) # 检查邮箱是否已存在 if UserInfo.objects.filter(email=email).exists(): messages.error(request, '邮箱已注册,请使用其他邮箱!') return render(request, 'register.html', { 'username': username, 'email': email, 'error': 'email_exists' }, status=400) # 检查邮箱格式 import re if not re.match(r'^[^\s@]+@[^\s@]+\.[^\s@]+$', email): messages.error(request, '请输入有效的邮箱地址!') return render(request, 'register.html', { 'username': username, 'email': email, 'error': 'email_invalid' }, status=400) # 检查密码复杂度 special_char_regex = re.compile(r'[!@#$%^&*(),.?":{}|<>]') if len(password1) < 8 or not special_char_regex.search(password1): messages.error(request, '密码长度至少为8个字符,且必须包含特殊字符!') return render(request, 'register.html', { 'username': username, 'email': email, 'error': 'password_weak' }, status=400) # 创建新用户 try: user = User.objects.create_user(username=username, email=email, password=password1) UserInfo.objects.create(user=user, email=email) messages.success(request, '注册成功!') return redirect('login') except Exception as e: messages.error(request, f'注册失败:{str(e)}') return render(request, 'register.html', status=500) return render(request, 'register.html') import logging logger = logging.getLogger(__name__) def user_login(request): if request.method == 'POST': form = AuthenticationForm(request, data=request.POST) if form.is_valid(): username = form.cleaned_data.get('username') password = form.cleaned_data.get('password') user = authenticate(username=username, password=password) if user is not None: login(request, user) next_url = request.GET.get('next') if next_url: return redirect(next_url) return redirect('home') else: logger.error(f"认证失败,用户名: {username}") messages.error(request, '用户名或密码错误,请重试。') else: form = AuthenticationForm() return render(request, 'login.html', {'form': form}) @login_required def user_logout(request): logout(request) messages.success(request, '您已成功退出登录。') return redirect('login') def check_username(request): username = request.GET.get('username') exists = UserInfo.objects.filter(user__username=username).exists() return JsonResponse({ 'exists': exists, 'valid': len(username) >= 3 if username else False }) def check_email(request): email = request.GET.get('email') exists = UserInfo.objects.filter(email=email).exists() import re return JsonResponse({ 'exists': exists, 'valid': bool(re.match(r'^[^\s@]+@[^\s@]+\.[^\s@]+$', email)) if email else False }) def password_reset(request): if request.method == 'POST': form = PasswordResetForm(request.POST) if form.is_valid(): email = form.cleaned_data.get('email') try: user = User.objects.get(email=email) # 生成重置密码的 token 和 uid token = default_token_generator.make_token(user) uid = urlsafe_base64_encode(force_bytes(user.pk)) # 构建重置密码的链接 from django.urls import reverse reset_url = request.build_absolute_uri(reverse('password_reset_confirm', args=[uid, token])) # 发送重置密码的邮件 send_mail( '重置密码', f'请点击以下链接重置您的密码:{reset_url}', settings.EMAIL_HOST_USER, [email], fail_silently=False, ) messages.success(request, '重置密码的邮件已发送,请检查您的邮箱。') return redirect('login') except User.DoesNotExist: messages.error(request, '该邮箱未注册。') else: form = PasswordResetForm() return render(request, 'password_reset.html', {'form': form}) def password_reset_confirm(request, uidb64, token): try: uid = force_str(urlsafe_base64_decode(uidb64)) user = User.objects.get(pk=uid) except (TypeError, ValueError, OverflowError, User.DoesNotExist): user = None if user is not None and default_token_generator.check_token(user, token): if request.method == 'POST': new_password1 = request.POST.get('new_password1') new_password2 = request.POST.get('new_password2') if new_password1 == new_password2: user.set_password(new_password1) user.save() messages.success(request, '密码重置成功,请使用新密码登录。') return redirect('login') else: messages.error(request, '两次输入的密码不一致,请重新输入。') return render(request, 'password_reset_confirm.html', {'uidb64': uidb64, 'token': token}) else: messages.error(request, '重置密码链接无效,请重新申请。') return redirect('password_reset') # @login_required def home(request): return render(request, 'home.html', {}) # @login_required def show_data_html(request): return render(request, 'show_data.html', {}) # @login_required def user_html(request): return render(request, 'user.html', {}) # @login_required def test_html(request): return render(request, 'test.html', {}) # @login_required def detection_html(request): return render(request, 'detection.html', {}) # @login_required def detection_CB_html(request): return render(request, 'detection_CB.html', {}) # @login_required def detection_video_html(request): return render(request, 'detection_video.html', {}) # @login_required def sjdsjksjskfs_html(request): return render(request, 'sjdsjksjskfs.html', {}) # @login_required def upload_video(request): if request.method == 'POST': try: form = VideoUploadForm(request.POST, request.FILES) if form.is_valid(): video_file = form.cleaned_data['video'] # 确保临时目录存在 temp_dir = os.path.join(settings.BASE_DIR, 'temp') if not os.path.exists(temp_dir): os.makedirs(temp_dir) video_path = os.path.join(temp_dir, video_file.name) # 保存视频文件 with open(video_path, 'wb+') as destination: for chunk in video_file.chunks(): destination.write(chunk) return JsonResponse({'status': 'success'}) except Exception as e: return JsonResponse({'status': 'error', 'message': str(e)}) return JsonResponse({ 'status': 'error', 'message': 'Invalid request method' }, status=405) # 明确返回405状态码 @csrf_exempt def detect_video(request): if request.method == 'POST': # 获取视频文件 video_file = request.FILES.get('video') if video_file: # 保存视频文件到临时目录 video_path = os.path.join('temp', video_file.name) with open(video_path, 'wb+') as destination: for chunk in video_file.chunks(): destination.write(chunk) # 模型路径 model_path = os.path.join(settings.BASE_DIR, "Behaviewer/models/best.pt") # 输出视频路径 output_video_dir = os.path.join(settings.MEDIA_ROOT, 'videos') if not os.path.exists(output_video_dir): os.makedirs(output_video_dir) output_video_path = os.path.join(output_video_dir, video_file.name) # 调用函数进行视频目标检测 detect_objects_in_video(model_path, video_path, output_video_path, show=False) # 删除临时视频文件 os.remove(video_path) # 返回检测好的视频文件的路径 output_video_url = f'{settings.MEDIA_URL}videos/{os.path.basename(output_video_path)}' return JsonResponse({'success': True, 'output_video_url': output_video_url}) else: return JsonResponse({'success': False, 'message': '未找到视频文件'}) else: return JsonResponse({'success': False, 'message': '只支持POST请求'}) # 错步页面模型调用检测 def detect_objects_in_video(model_path, video_path, output_path=None, show=True): # 加载预训练模型 model = YOLO(model_path) # 打开视频文件 cap = cv2.VideoCapture(video_path) # 获取视频的帧率、宽度和高度 fps = cap.get(cv2.CAP_PROP_FPS) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 如果指定了输出路径,创建视频写入对象 if output_path: fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter(output_path, fourcc, fps, (width, height)) frame_count = 0 total_time = 0 while cap.isOpened(): ret, frame = cap.read() if not ret: break # 记录开始时间 start_time = time.time() # 进行目标检测 results = model(frame) # 记录结束时间并累加耗时 end_time = time.time() total_time += end_time - start_time frame_count += 1 # 获取检测结果并绘制到帧上 annotated_frame = results[0].plot() # 计算并显示FPS fps_current = 1 / (end_time - start_time) cv2.putText(annotated_frame, f"FPS: {fps_current:.2f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) # 保存帧到输出视频 if output_path: out.write(annotated_frame) # 显示结果 if show: cv2.imshow("Object Detection", annotated_frame) if cv2.waitKey(1) & 0xFF == ord('q'): break # 释放资源 cap.release() if output_path: out.release() cv2.destroyAllWindows() # 计算平均FPS if frame_count > 0: avg_fps = frame_count / total_time print(f"处理完成! 平均FPS: {avg_fps:.2f}") print(f"总帧数: {frame_count}") print(f"总耗时: {total_time:.2f}秒") #模态框 #框选目标 #deepseek版 # @csrf_exempt # def detect_target(request): # if request.method == 'POST': # try: # # 获取POST数据 # data = json.loads(request.body) # image_data = data['image'] # roi_data = data['roi'] # # # 从Base64提取图像数据 # header, encoded = image_data.split(",", 1) # image_bytes = base64.b64decode(encoded) # nparr = np.frombuffer(image_bytes, np.uint8) # frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR) # # # 准备配置区域(根据ROI) # x, y, w, h = roi_data['x'], roi_data['y'], roi_data['width'], roi_data['height'] # points = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]] # # # 配置检测区域(格式要求与page_4_1_auto_set.py一致) # config_area_current = {'观察区1': [[points]]} # # # 调用检测算法(使用默认参数) # processed_frame, mean_value = image_detect( # frame, # config_area_current, # currentback=0, # 0=检测比背景暗的目标 # kernal_erode=1, # kernal_dilate=1, # kernal_erode_2=1, # min_area=1, # max_area=10000, # adjust_threshold=15 # ) # # # 将处理后的图像转换为Base64 # _, buffer = cv2.imencode('.jpg', processed_frame) # processed_image_base64 = base64.b64encode(buffer).decode('utf-8') # # return JsonResponse({ # 'success': True, # 'processed_image': processed_image_base64, # 'mean_value': mean_value # }) # except Exception as e: # return JsonResponse({ # 'success': False, # 'message': str(e) # }) # return JsonResponse({'success': False, 'message': 'Invalid request'}) # 存储视频处理任务状态 video_processing_tasks = {} @csrf_exempt def detect_target(request): if request.method == 'POST': try: data = json.loads(request.body) image_data = data['image'] roi_data = data['roi'] video_id = data.get('video_id', str(time.time())) # 从Base64提取图像数据 header, encoded = image_data.split(",", 1) image_bytes = base64.b64decode(encoded) nparr = np.frombuffer(image_bytes, np.uint8) frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR) # 准备配置区域 x, y, w, h = roi_data['x'], roi_data['y'], roi_data['width'], roi_data['height'] points = [[x, y], [x + w, y], [x + w, y + h], [x, y + h]] config_area_current = {'观察区1': [[points]]} # 获取其他配置参数 currentback = data.get('currentback', 0) kernal_erode = data.get('kernal_erode', 1) kernal_dilate = data.get('kernal_dilate', 1) kernal_erode_2 = data.get('kernal_erode_2', 1) min_area = data.get('min_area', 1) max_area = data.get('max_area', 10000) adjust_threshold = data.get('adjust_threshold', 15) # 启动视频处理任务 video_processing_tasks[video_id] = { 'status': 'processing', 'progress': 0, 'config': { 'config_area_current': config_area_current, 'currentback': currentback, 'kernal_erode': kernal_erode, 'kernal_dilate': kernal_dilate, 'kernal_erode_2': kernal_erode_2, 'min_area': min_area, 'max_area': max_area, 'adjust_threshold': adjust_threshold }, 'mean_value': None } # 在后台线程中处理视频 threading.Thread( target=process_video, args=(video_id, frame, video_processing_tasks[video_id]['config']) ).start() return JsonResponse({ 'success': True, 'video_id': video_id, 'message': '视频处理已启动' }) except Exception as e: return JsonResponse({ 'success': False, 'message': str(e) }) return JsonResponse({'success': False, 'message': 'Invalid request'}) def process_video(video_id, sample_frame, config): try: output_dir = os.path.join(settings.MEDIA_ROOT, 'processed_videos') os.makedirs(output_dir, exist_ok=True) output_path = os.path.join(output_dir, f'{video_id}.mp4') print(f"Processing video to: {output_path}") height, width = sample_frame.shape[:2] fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter(output_path, fourcc, 30, (width, height)) # 模拟处理过程 total_frames = 300 mean_values = [] for i in range(total_frames): progress = int((i + 1) / total_frames * 100) video_processing_tasks[video_id]['progress'] = progress processed_frame, mean_value = image_detect( sample_frame.copy(), config['config_area_current'], currentback=config['currentback'], kernal_erode=config['kernal_erode'], kernal_dilate=config['kernal_dilate'], kernal_erode_2=config['kernal_erode_2'], min_area=config['min_area'], max_area=config['max_area'], adjust_threshold=config['adjust_threshold'] ) out.write(processed_frame) mean_values.append(mean_value) time.sleep(0.03) mean_avg = sum(mean_values) / len(mean_values) if mean_values else 0 video_processing_tasks[video_id] = { 'status': 'completed', 'progress': 100, 'output_path': output_path, 'mean_value': mean_avg } out.release() # 验证生成的视频文件 if not os.path.exists(output_path): print(f"错误:视频文件未生成: {output_path}") elif os.path.getsize(output_path) == 0: print(f"错误:生成的视频文件为空: {output_path}") else: # 尝试打开验证 test_cap = cv2.VideoCapture(output_path) if test_cap.isOpened(): frame_count = int(test_cap.get(cv2.CAP_PROP_FRAME_COUNT)) print(f"视频验证成功: {frame_count}帧") test_cap.release() else: print(f"错误:生成的视频无法打开: {output_path}") except Exception as e: video_processing_tasks[video_id] = { 'status': 'failed', 'message': str(e) } @csrf_exempt def get_processing_status(request): """获取视频处理状态""" video_id = request.GET.get('video_id') if not video_id: return JsonResponse({'success': False, 'message': 'Missing video_id'}) task = video_processing_tasks.get(video_id) if not task: return JsonResponse({'success': False, 'message': 'Invalid video_id'}) response = { 'status': task['status'], 'progress': task.get('progress', 0) } if task['status'] == 'completed': response['video_url'] = f"{settings.MEDIA_URL}processed_videos/{video_id}.mp4" response['mean_value'] = task['mean_value'] return JsonResponse(response) def video_stream(request, video_id): """实时视频流处理""" output_dir = os.path.join(settings.MEDIA_ROOT, 'processed_videos') output_path = os.path.join(output_dir, f'{video_id}.mp4') # 检查文件是否存在 if not os.path.exists(output_path): print(f"视频文件不存在: {output_path}") return JsonResponse({'error': '视频文件不存在'}, status=404) # 检查文件大小 if os.path.getsize(output_path) == 0: print(f"视频文件为空: {output_path}") return JsonResponse({'error': '视频文件无效'}, status=500) try: cap = cv2.VideoCapture(output_path) if not cap.isOpened(): print(f"OpenCV无法打开视频: {output_path}") # 尝试使用FFmpeg后端 cap = cv2.VideoCapture(output_path, cv2.CAP_FFMPEG) if not cap.isOpened(): raise RuntimeError("FFmpeg后端也无法打开视频") # 获取视频属性 fps = cap.get(cv2.CAP_PROP_FPS) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) print(f"视频属性: {width}x{height}@{fps}fps") def generate(): while cap.isOpened(): ret, frame = cap.read() if not ret: break # 将帧转换为JPEG格式 ret, jpeg = cv2.imencode('.jpg', frame) if not ret: continue yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + jpeg.tobytes() + b'\r\n') cap.release() print("视频流结束") return StreamingHttpResponse( generate(), content_type='multipart/x-mixed-replace; boundary=frame' ) except Exception as e: print(f"视频流错误: {str(e)}") return JsonResponse({ 'success': False, 'message': f'视频流错误: {str(e)}' }, status=500) @csrf_exempt def update_processing_config(request): """更新处理配置""" data = json.loads(request.body) video_id = data.get('video_id') config = data.get('config') if not video_id or not config: return JsonResponse({'success': False, 'message': 'Missing parameters'}) task = video_processing_tasks.get(video_id) if not task or task['status'] != 'processing': return JsonResponse({'success': False, 'message': 'Invalid task'}) # 更新配置 task['config'] = { **task['config'], **config } return JsonResponse({'success': True}) @csrf_exempt def cancel_processing(request): """取消处理任务""" video_id = request.GET.get('video_id') if not video_id: return JsonResponse({'success': False, 'message': 'Missing video_id'}) if video_id in video_processing_tasks: # 标记任务为取消状态 video_processing_tasks[video_id]['status'] = 'cancelled' return JsonResponse({'success': True}) 读取的视频应该是点击开始检测后,在模态框中带ROI的视频,路径是否需要修改,报错OpenCV(4.11.0) D:\a\opencv-python\opencv-python\opencv\modules\videoio\src\cap_images.cpp:267: error: (-215:Assertion failed) number < max_number in function 'cv::icvExtractPattern' OpenCV无法打开视频: C:\Users\16660\Desktop\网页搭建\Behaviewer\media\processed_videos\1753693248.7277818.mp4 视频流错误: FFmpeg后端也无法打开视频 Internal Server Error: /video_stream/1753693248.7277818/ HTTP GET /video_stream/1753693248.7277818/ 500 [0.16, 127.0.0.1:9128] HTTP GET /get_processing_status/?video_id=1753693248.7277818 200 [0.00, 127.0.0.1:9128]
最新发布
07-29
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值