ViewPagerAndroid has been extracted from react-native core

博客讲述了如何处理ViewPagerAndroid从react-native核心库移除的警告。通过安装并链接@react-native-community/viewpager,替换第三方组件中的ViewPagerAndroid,如在react-native-tab-view中的使用,可以成功解决问题。

这个警告很明显就是说ViewPagerAndroid即将从react-native-core核心包中移除

解决这个警告也很简单 安装@react-native-community/viewpager 按照他说的安装并且link

我在代码中并没有使用介个ViewPagerAndroid那么很明显肯定是第三方组件引用了介个东西。

结合自己的情况我最近使用了 react-native-tab-view这个组件  那就在这个组件里找

追根溯源,找PagerAndroid

对就是介个,将其改为

import * as React from 'react';
import { View, StyleSheet, I18nManager } from 'react-native';
import { PagerRendererPropType } from './PropTypes';
import type { PagerRendererProps } from './TypeDefinitions';
import ViewPagerAndroid from '@react-native-community/viewpager';

 

import ViewPagerAndroid from '@react-native-community/viewpager';

大功告成,以后遇到类似的问题  解决办法跟这个类似。

 

 

import CameraPermissionWrapper from './components/CameraPermissionWrapper'; import { useRef, useState } from 'react'; import { View, Text } from 'react-native'; import { Worklets } from 'react-native-worklets-core'; import { NitroModules } from 'react-native-nitro-modules'; import { BoxedInspireFace, CameraRotation, DetectMode, InspireFace, } from 'react-native-nitro-inspire-face'; import { useResizePlugin } from 'vision-camera-resize-plugin'; import { Camera, Templates, useCameraDevice, useCameraFormat, useFrameProcessor, VisionCameraProxy, Frame, runAtTargetFps, } from 'react-native-vision-camera'; InspireFace.launch('Pikachu'); //let plugin = VisionCameraProxy.initFrameProcessorPlugin('detectFaces'); /*export function detectFaces(frame: Frame): object { 'worklet'; if (plugin == null) throw new Error('Failed to load Frame Processor Plugin "scanFaces"!'); return plugin.call(frame); }*/ export default function App() { let device = useCameraDevice('back'); const camera = useRef<Camera>(null); const [faceConfidence, setFaceConfidence] = useState<number | null>(null); const [faceQuality, setFaceQuality] = useState<number | null>(null); const [faceCount, setFaceCount] = useState<number | null>(null); const format = useCameraFormat(device, Templates.FrameProcessing); const { resize } = useResizePlugin(); const featureCache = useRef<ArrayBuffer[]>([]); // 创建 Worklet 兼容的更新函数 const updateResult = Worklets.createRunOnJS( (confidence: number, quality: number, count: number) => { setFaceConfidence(confidence); setFaceQuality(quality); setFaceCount(count); }); // Worklet 专用配置 const frameProcessor = useFrameProcessor((frame) => { 'worklet'; runAtTargetFps(1, () => { 'worklet'; const size = 320; // Resize frame for processing const resized = resize(frame, { scale: { width: size, height: size, }, rotation: '90deg', pixelFormat: 'bgr', dataType: 'uint8', mirror: true, }); // Unbox InspireFace instance for frame processor const unboxedInspireFace = BoxedInspireFace.unbox(); // Create image bitmap from frame buffer const bitmap = unboxedInspireFace.createImageBitmapFromBuffer( resized.buffer as ArrayBuffer, size, size, 3 ); // Create image stream for face detection const imageStream = unboxedInspireFace.createImageStreamFromBitmap( bitmap, CameraRotation.ROTATION_0 ); // Detect faces let unboxedSession = unboxedInspireFace.createSession( { enableRecognition: true, enableFaceQuality: true, }, DetectMode.ALWAYS_DETECT, 10, -1, // Detection resolution level (multiple of 160, e.g., 160, 320, 640; default -1 means 320) -1 // Frame rate for tracking mode (default -1 means 30fps) ); unboxedSession.setTrackPreviewSize(320); //Default 192 unboxedSession.setFaceDetectThreshold(0.7); unboxedSession.setFilterMinimumFacePixelSize(0); //Default 0 const multipleFaceData = unboxedSession.executeFaceTrack(imageStream); // ✅ 获取质量评分(必须在 executeFaceTrack 后调用) const qualityScores = unboxedSession.getFaceQualityConfidence(); if ( multipleFaceData.length > 0 && multipleFaceData[0] && qualityScores.length > 0 && qualityScores[0] ) { // ✅ 获取检测置信度和质量评分 const detectionConfidence = multipleFaceData[0].detConfidence; const newFaceQuality = qualityScores[0]; // 更新 UI //updateResult(detectionConfidence, newFaceQuality); // Extract face feature // ✅ 仅保留高质量特征值(质量评分 > 0.7) if (newFaceQuality > 0.7) { // ✅ 提取特征值并缓存 const feature = unboxedSession.extractFaceFeature( imageStream, multipleFaceData[0].token ); console.log( 'Feature extracted, size:', new Float32Array(feature).length ); // ✅ 仅缓存特征值(ArrayBuffer) featureCache.current.push(feature); // ✅ 更新采集进度 updateResult( detectionConfidence, newFaceQuality, featureCache.current.length ); // ✅ 检查是否攒够5个特征值 if (featureCache.current.length >= 5) { //const batch = featureCache.current; //featureCache.current = []; // 清空缓存 // ✅ 上传到后端 //batchProcessWorklet(batch); } } } // Clean up resources imageStream.dispose(); bitmap.dispose(); unboxedSession.dispose(); }); //const faces = detectFaces(frame); //updateResult(faces); }, []); return ( <View style={{ flex: 1 }}> <CameraPermissionWrapper> <Camera ref={camera} style={{ flex: 1 }} device={device!} isActive={true} format={format} frameProcessor={frameProcessor} //frameProcessorFps={'auto'} /> </CameraPermissionWrapper> {faceConfidence !== null && ( <Text style={{ position: 'absolute', top: 20, left: 20, color: 'red' }}> 检测到人脸置信度: {faceConfidence.toFixed(2)} (0-1) </Text> )} {faceQuality !== null && ( <Text style={{ position: 'absolute', top: 50, left: 20, color: 'blue' }}> 人脸质量评分: {faceQuality.toFixed(2)} (0-1) </Text> )} {faceCount !== null && ( <Text style={{ position: 'absolute', top: 80, left: 20, color: 'green' }}> 人脸数量: {faceCount} </Text> )} </View> ); } push导致采集图片数量一直为零
最新发布
08-08
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值