//
// CustomCameraView.swift
// CheckInCamera
//
// Created by csld on 2025/7/17.
//
import UIKit
import AVFoundation
import CoreVideo
import VideoToolbox
// UIImage扩展,用于添加水印
extension UIImage {
func addVideoWatermark(_ watermarkView: UIView, targetFrame: CGRect) -> UIImage? {
// 将UIView转换为UIImage
let watermarkImage = watermarkView.convertToImage()
// 开始图片上下文 - 使用设备的实际缩放因子
UIGraphicsBeginImageContextWithOptions(size, true, UIScreen.main.scale)
defer { UIGraphicsEndImageContext() }
// 绘制原始图片
draw(in: CGRect(origin: .zero, size: size))
// 计算水印在目标帧中的大小和位置
let watermarkSize = watermarkImage.size
let targetSize = targetFrame.size
// 计算缩放比例
let baseScaleX = targetSize.width / watermarkSize.width
let baseScaleY = targetSize.height / watermarkSize.height
let scaleX = baseScaleX * 2.2 // 横向放大倍数
let scaleY = baseScaleY * 2.2 // 纵向放大倍数
// 计算缩放后的水印大小
let scaledSize = CGSize(
width: watermarkSize.width * scaleX,
height: watermarkSize.height * scaleY
)
// 计算水印在目标帧中的居中位置(原始逻辑)
var x = targetFrame.origin.x + (targetFrame.size.width - scaledSize.width) / 2
var y = targetFrame.origin.y + (targetFrame.size.height - scaledSize.height) / 2
// 关键修复:修正Y轴方向(如果视频帧Y轴与预览层相反)
// 原理:用视频高度减去计算出的Y值,实现“底部”位置反转
y = size.height - y - scaledSize.height // 核心修正代码
// 限制水印在视频帧范围内
x = max(0, min(x, size.width - scaledSize.width))
y = max(0, min(y, size.height - scaledSize.height))
// 绘制水印
watermarkImage.draw(in: CGRect(x: x, y: y, width: scaledSize.width, height: scaledSize.height))
// 获取合成后的图片
return UIGraphicsGetImageFromCurrentImageContext()
}
}
extension UIView {
func convertToImage() -> UIImage {
// 定义变量存储结果
var resultImage: UIImage = UIImage()
// 强制在主线程执行 UI 相关操作
DispatchQueue.main.sync {
// 确保在主线程创建图像上下文 - 使用设备的实际缩放因子
UIGraphicsBeginImageContextWithOptions(self.bounds.size, self.isOpaque, UIScreen.main.scale)
defer { UIGraphicsEndImageContext() }
// 确保在主线程渲染图层
if let context = UIGraphicsGetCurrentContext() {
self.layer.render(in: context)
if let image = UIGraphicsGetImageFromCurrentImageContext() {
resultImage = image
}
}
}
return resultImage
}
}
// 相机权限状态
enum CameraAuthorizationStatus {
case authorized
case denied
case notDetermined
case restricted
}
// 相机错误类型
enum CameraError: Error {
case captureSessionAlreadyRunning
case captureSessionIsMissing
case inputsAreInvalid
case invalidOperation
case noCameraAvailable
case torchUnavailable
case unknown
}
// 相机代理协议
protocol CameraViewDelegate: AnyObject {
func cameraView(_ cameraView: CustomCameraView, didCapturePhoto photo: UIImage)
func cameraView(_ cameraView: CustomCameraView, didFailWithError error: Error)
func cameraViewDidChangeAuthorizationStatus(_ cameraView: CustomCameraView, status: CameraAuthorizationStatus)
// 视频录制状态回调
func cameraViewDidStartRecording(_ cameraView: CustomCameraView)
func cameraViewDidStopRecording(_ cameraView: CustomCameraView, videoURL: URL?)
}
class CustomCameraView: UIView {
// MARK: - 属性
weak var delegate: CameraViewDelegate?
// 相机设备
private var captureSession: AVCaptureSession?
private var videoDeviceInput: AVCaptureDeviceInput?
private var photoOutput: AVCapturePhotoOutput?
private var videoDataOutput: AVCaptureVideoDataOutput?
private var previewLayer: AVCaptureVideoPreviewLayer?
// 用于视频数据处理的队列
private let videoDataOutputQueue = DispatchQueue(label: "com.example.videoDataOutput")
// 视频录制相关属性
private var assetWriter: AVAssetWriter?
private var assetWriterInput: AVAssetWriterInput?
private var assetWriterInputPixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor?
private var isRecording = false
private var recordingStartTime: CMTime?
private var videoOutputURL: URL?
// 存储最后一个样本缓冲区,用于获取视频尺寸
private var lastSampleBuffer: CMSampleBuffer?
// 水印位置
var watermarkPosition: CGPoint?
// 添加专用串行队列用于AVAssetWriter操作
private let recordingQueue = DispatchQueue(label: "com.CheckInCamera.recordingQueue")
// 当前相机设置
private var currentCameraPosition: AVCaptureDevice.Position = .back
private var currentFlashMode: AVCaptureDevice.FlashMode = .auto
/// 当前缩放比例
private var currentZoomFactor: CGFloat = 1.0
private let minZoomFactor: CGFloat = 1.0
private let maxZoomFactor: CGFloat = 5.0
private var isTorchOn = false
private var processedImageSize: CGSize?
// 1. 预先渲染水印(只需一次)
private var watermarkImage: CGImage?
// 视图生命周期
override init(frame: CGRect) {
super.init(frame: frame)
setupView()
}
required init?(coder: NSCoder) {
super.init(coder: coder)
setupView()
}
deinit {
stopSession()
}
// MARK: - 初始化方法
private func setupView() {
// 设置预览层
previewLayer = AVCaptureVideoPreviewLayer()
previewLayer?.videoGravity = .resizeAspectFill
previewLayer?.masksToBounds = true
layer.addSublayer(previewLayer!)
// 检查相机权限
checkCameraAuthorization()
}
override func layoutSubviews() {
super.layoutSubviews()
previewLayer?.frame = bounds
}
// MARK: - 相机权限管理
func checkCameraAuthorization() {
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .authorized:
delegate?.cameraViewDidChangeAuthorizationStatus(self, status: .authorized)
setupCaptureSession()
case .notDetermined:
delegate?.cameraViewDidChangeAuthorizationStatus(self, status: .notDetermined)
requestCameraAccess()
case .denied, .restricted:
delegate?.cameraViewDidChangeAuthorizationStatus(self, status: .denied)
@unknown default:
delegate?.cameraViewDidChangeAuthorizationStatus(self, status: .restricted)
}
}
private func requestCameraAccess() {
AVCaptureDevice.requestAccess(for: .video) { [weak self] granted in
DispatchQueue.main.async {
if granted {
self?.delegate?.cameraViewDidChangeAuthorizationStatus(self!, status: .authorized)
self?.setupCaptureSession()
} else {
self?.delegate?.cameraViewDidChangeAuthorizationStatus(self!, status: .denied)
}
}
}
}
// MARK: - 相机设置
private func setupCaptureSession() {
captureSession = AVCaptureSession()
captureSession?.sessionPreset = .hd1920x1080
// 设置输入设备
do {
try setupCameraInputs()
setupPhotoOutput()
setupVideoDataOutput() // 添加视频数据输出
} catch {
delegate?.cameraView(self, didFailWithError: error)
}
// 开始会话
startSession()
}
private func setupCameraInputs() throws {
guard let captureSession = captureSession else { throw CameraError.captureSessionIsMissing }
// 获取相机设备
let cameraDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: currentCameraPosition)
guard let device = cameraDevice else { throw CameraError.noCameraAvailable }
// 创建设备输入
let deviceInput = try AVCaptureDeviceInput(device: device)
// 检查并添加输入
if captureSession.canAddInput(deviceInput) {
captureSession.addInput(deviceInput)
videoDeviceInput = deviceInput
} else {
throw CameraError.inputsAreInvalid
}
// 更新预览层
previewLayer?.session = captureSession
}
private func setupPhotoOutput() {
guard let captureSession = captureSession else { return }
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil)
if captureSession.canAddOutput(photoOutput!) {
captureSession.addOutput(photoOutput!)
}
}
// 设置视频数据输出方法
private func setupVideoDataOutput() {
guard let captureSession = captureSession else { return }
videoDataOutput = AVCaptureVideoDataOutput()
// 设置视频格式
videoDataOutput?.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
// 设置总是丢弃过时的帧
videoDataOutput?.alwaysDiscardsLateVideoFrames = true
// 设置代理和队列
videoDataOutput?.setSampleBufferDelegate(self, queue: videoDataOutputQueue)
if captureSession.canAddOutput(videoDataOutput!) {
captureSession.addOutput(videoDataOutput!)
}
}
// MARK: - 视频录制方法
/// 开始录制视频
func startRecording() {
if isRecording {
delegate?.cameraView(self, didFailWithError: CameraError.invalidOperation)
return
}
do {
// 创建视频输出URL
let documentsDirectory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!
let dateFormatter = DateFormatter()
dateFormatter.dateFormat = "yyyyMMddHHmmss"
let fileName = "video_\(dateFormatter.string(from: Date())).mp4"
videoOutputURL = documentsDirectory.appendingPathComponent(fileName)
// 确保删除已存在的文件
if FileManager.default.fileExists(atPath: videoOutputURL!.path) {
try FileManager.default.removeItem(at: videoOutputURL!)
}
// 创建AVAssetWriter
assetWriter = try AVAssetWriter(outputURL: videoOutputURL!, fileType: .mp4)
var (videoWidth, videoHeight) = getPortraitResolution()
// 设置视频输入
// 确保使用正确的宽高比
if let processedSize = processedImageSize {
// 检查宽高是否有效,避免除以零
guard processedSize.width > 0, processedSize.height > 0 else {
let defaultAspectRatio: CGFloat = 9.0 / 16.0
videoHeight = Int(CGFloat(videoWidth) / defaultAspectRatio)
videoHeight = max(videoHeight, 1) // 确保至少为1
print("处理后图像尺寸无效,使用默认宽高比: \(videoWidth)x\(videoHeight)")
return
}
// 使用处理后图像的宽高比
let aspectRatio = processedSize.width / processedSize.height
// 检查比例是否有效
guard aspectRatio > 0, !aspectRatio.isInfinite, !aspectRatio.isNaN else {
videoHeight = 1920
print("宽高比无效,使用默认高度: \(videoWidth)x\(videoHeight)")
return
}
// 关键修复:使用CGFloat完整计算,避免类型转换导致的精度丢失
let calculatedHeight = CGFloat(videoWidth) / aspectRatio
// 确保高度有效且不为零
guard calculatedHeight > 0, !calculatedHeight.isInfinite, !calculatedHeight.isNaN else {
videoHeight = 1920
print("计算高度无效,使用默认高度: \(videoWidth)x\(videoHeight)")
return
}
videoHeight = Int(calculatedHeight)
videoHeight = max(videoHeight, 1) // 确保高度至少为1
print("使用处理后图像宽高比: \(videoWidth)x\(videoHeight)")
} else if let sampleBuffer = lastSampleBuffer, let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) {
// 使用相机捕获图像的宽高比
let bufferWidth = CGFloat(CVPixelBufferGetWidth(imageBuffer))
let bufferHeight = CGFloat(CVPixelBufferGetHeight(imageBuffer))
let aspectRatio = bufferWidth / bufferHeight
// 保持宽度,根据宽高比调整高度
videoHeight = videoWidth / Int(aspectRatio)
print("使用相机捕获宽高比: \(videoWidth)x\(videoHeight)")
}
let bitsPerPixel: Float = 0.5 // 调整这个值来控制质量 (0.1-0.3)
let frameRate: Int = 30
let bitsPerSecond = Int(Float(videoWidth * videoHeight * frameRate) * bitsPerPixel)
let videoSettings: [String: Any] = [
AVVideoCodecKey: AVVideoCodecType.h264,
AVVideoWidthKey: 1280,
AVVideoHeightKey: 720,
AVVideoScalingModeKey: AVVideoScalingModeResizeAspectFill,
AVVideoCompressionPropertiesKey: [
AVVideoAverageBitRateKey: 20_000_000, // 直接设置为12 Mbps,确保高质量
AVVideoProfileLevelKey: AVVideoProfileLevelH264HighAutoLevel,
AVVideoMaxKeyFrameIntervalKey: frameRate, // 每两秒一个关健帧
AVVideoAllowFrameReorderingKey: true,
AVVideoExpectedSourceFrameRateKey: frameRate,
AVVideoQualityKey: 0.9, // 质量设置 (0.0-1.0)
AVVideoMaxKeyFrameIntervalDurationKey: 1, // 关键帧最大间隔(秒)
]
]
assetWriterInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings)
assetWriterInput?.expectsMediaDataInRealTime = true
// 创建像素缓冲区适配器
let sourcePixelBufferAttributes: [String: Any] = [
kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA),
kCVPixelBufferWidthKey as String: videoWidth,
kCVPixelBufferHeightKey as String: videoHeight,
kCVPixelBufferIOSurfacePropertiesKey as String: [:] // 添加IOSurface支持
]
assetWriterInputPixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(
assetWriterInput: assetWriterInput!,
sourcePixelBufferAttributes: sourcePixelBufferAttributes
)
// 添加输入
if assetWriter?.canAdd(assetWriterInput!) ?? false {
assetWriter?.add(assetWriterInput!)
}
// 开始写入(此时不启动会话,等待第一个帧的时间戳)
assetWriter?.startWriting()
isRecording = true
recordingStartTime = nil // 重置为nil,等待第一个帧
// 通知代理
DispatchQueue.main.async {
self.delegate?.cameraViewDidStartRecording(self)
}
} catch {
delegate?.cameraView(self, didFailWithError: error)
isRecording = false
assetWriter = nil
assetWriterInput = nil
assetWriterInputPixelBufferAdaptor = nil
}
}
/// 停止录制视频
func stopRecording() {
recordingQueue.async { [weak self] in
guard let self = self, self.isRecording else { return }
self.isRecording = false
guard let assetWriter = self.assetWriter,
let assetWriterInput = self.assetWriterInput else {
self.handleRecordingError(nil)
return
}
// 标记输入完成
assetWriterInput.markAsFinished()
// 完成写入并保存视频
assetWriter.finishWriting { [weak self] in
guard let self = self else { return }
if assetWriter.status == .completed {
// 保存视频到相册(可选)
if let videoURL = self.videoOutputURL {
UISaveVideoAtPathToSavedPhotosAlbum(
videoURL.path,
nil,
nil,
nil
)
}
// 通知代理视频路径
DispatchQueue.main.async {
self.delegate?.cameraViewDidStopRecording(self, videoURL: self.videoOutputURL)
}
} else {
self.handleRecordingError(assetWriter.error)
}
// 清理资源
self.assetWriter = nil
self.assetWriterInput = nil
self.assetWriterInputPixelBufferAdaptor = nil
self.recordingStartTime = nil
}
}
}
/// 1. 确定竖屏分辨率 - 优先使用设备支持的最佳竖屏分辨率
func getPortraitResolution() -> (width: Int, height: Int) {
let session = AVCaptureDevice.DiscoverySession(
deviceTypes: [.builtInWideAngleCamera],
mediaType: .video,
position: .back
)
guard let device = session.devices.first else {
return (1080, 1920)
}
var bestResolution: CMVideoDimensions?
var bestPixelCount = 0
// 直接遍历而不创建中间数组
for format in device.formats {
let dimensions = CMVideoFormatDescriptionGetDimensions(format.formatDescription)
// 跳过横屏分辨率
guard dimensions.height > dimensions.width else { continue }
// 计算像素数
let pixelCount = Int(dimensions.width) * Int(dimensions.height)
// 更新最佳分辨率
if pixelCount > bestPixelCount {
bestPixelCount = pixelCount
bestResolution = dimensions
}
}
if let best = bestResolution {
return (Int(best.width), Int(best.height))
}
// 默认值
return (1080, 1920)
}
// 错误处理方法
private func handleRecordingError(_ error: Error?) {
isRecording = false
assetWriter = nil
assetWriterInput = nil
assetWriterInputPixelBufferAdaptor = nil
recordingStartTime = nil
DispatchQueue.main.async {
self.delegate?.cameraView(self, didFailWithError: error ?? CameraError.unknown)
}
}
// MARK: - 相机控制
func startSession() {
guard let captureSession = captureSession, !captureSession.isRunning else { return }
DispatchQueue.global(qos: .userInitiated).async {
captureSession.startRunning()
}
}
func stopSession() {
guard let captureSession = captureSession, captureSession.isRunning else { return }
DispatchQueue.global(qos: .userInitiated).async {
captureSession.stopRunning()
}
}
/// 切换前后摄像头
private let cameraQueue = DispatchQueue(label: "com.example.camera")
func switchCamera() {
guard let captureSession = captureSession else { return }
cameraQueue.async { [weak self] in
guard let self = self else { return }
if captureSession.isRunning {
self.stopSession()
}
self.currentCameraPosition = (self.currentCameraPosition == .back) ? .front : .back
captureSession.inputs.forEach { input in
captureSession.removeInput(input)
}
do {
try self.setupCameraInputs()
self.startSession()
DispatchQueue.main.async {
self.previewLayer?.session = captureSession
self.previewLayer?.frame = self.bounds
}
} catch {
DispatchQueue.main.async {
self.delegate?.cameraView(self, didFailWithError: error)
self.currentCameraPosition = (self.currentCameraPosition == .back) ? .front : .back
self.cameraQueue.async {
try? self.setupCameraInputs()
self.startSession()
DispatchQueue.main.async {
self.previewLayer?.session = captureSession
self.previewLayer?.frame = self.bounds
}
}
}
}
}
}
/// 闪光灯设置
func setFlashMode(_ flashMode: AVCaptureDevice.FlashMode) {
currentFlashMode = flashMode
}
/// 拍照片
func capturePhoto() {
guard let photoOutput = photoOutput else { return }
let settings = AVCapturePhotoSettings()
settings.flashMode = currentFlashMode
photoOutput.capturePhoto(with: settings, delegate: self)
}
// MARK: - 灯光控制
func toggleTorch() {
guard let device = AVCaptureDevice.default(for: .video),
device.hasTorch else {
delegate?.cameraView(self, didFailWithError: CameraError.noCameraAvailable)
return
}
do {
try device.lockForConfiguration()
defer { device.unlockForConfiguration() }
if device.isTorchAvailable {
if device.torchMode == .off {
try device.setTorchModeOn(level: AVCaptureDevice.maxAvailableTorchLevel)
isTorchOn = true
} else {
device.torchMode = .off
isTorchOn = false
}
}
} catch {
delegate?.cameraView(self, didFailWithError: error)
}
}
/// 水印视图
var watermarkView: UIView?
/// 设置缩放比例
func setZoom(scale: CGFloat) {
guard let device = AVCaptureDevice.default(for: .video) else { return }
do {
try device.lockForConfiguration()
defer { device.unlockForConfiguration() }
let effectiveScale = min(max(scale, minZoomFactor), maxZoomFactor)
currentZoomFactor = effectiveScale
device.videoZoomFactor = effectiveScale
} catch {
delegate?.cameraView(self, didFailWithError: error)
}
}
func setTorch(on: Bool) {
guard on != isTorchOn else { return }
toggleTorch()
}
}
// MARK: - AVCapturePhotoCaptureDelegate
extension CustomCameraView: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let error = error {
delegate?.cameraView(self, didFailWithError: error)
return
}
guard let imageData = photo.fileDataRepresentation() else {
delegate?.cameraView(self, didFailWithError: CameraError.unknown)
return
}
if let image = UIImage(data: imageData) {
delegate?.cameraView(self, didCapturePhoto: image)
} else {
delegate?.cameraView(self, didFailWithError: CameraError.unknown)
}
}
}
// MARK: - AVCaptureVideoDataOutputSampleBufferDelegate
extension CustomCameraView: AVCaptureVideoDataOutputSampleBufferDelegate {
// 在 CustomCameraView 中添加方法,将预览坐标转换为视频坐标
private func convertPreviewFrameToVideoFrame(previewFrame: CGRect) -> CGRect {
guard let videoWidth = assetWriterInput?.outputSettings?[AVVideoWidthKey] as? CGFloat,
let videoHeight = assetWriterInput?.outputSettings?[AVVideoHeightKey] as? CGFloat else {
return previewFrame
}
// 预览层尺寸(即当前视图尺寸)
let previewSize = bounds.size
// 计算预览与视频的缩放比例
let scaleX = videoWidth / previewSize.width
let scaleY = videoHeight / previewSize.height
// 转换坐标和尺寸
return CGRect(
x: previewFrame.origin.x * scaleX,
y: previewFrame.origin.y * scaleY,
width: previewFrame.width * scaleX,
height: previewFrame.height * scaleY
)
}
// 将CMSampleBuffer转换为UIImage的方法(处理方向)
func convertSampleBufferToUIImage(_ sampleBuffer: CMSampleBuffer) -> UIImage? {
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return nil
}
// 1. 创建 CIImage
var ciImage = CIImage(cvPixelBuffer: imageBuffer)
// 2. 获取视频连接的方向(优先使用连接的方向)
var videoOrientation: AVCaptureVideoOrientation = .portrait
if let connection = videoDataOutput?.connection(with: .video),
connection.isVideoOrientationSupported {
videoOrientation = connection.videoOrientation
} else {
// 如果没有连接信息,使用设备方向
switch UIDevice.current.orientation {
case .portrait: videoOrientation = .portrait
case .portraitUpsideDown: videoOrientation = .portraitUpsideDown
case .landscapeLeft: videoOrientation = .landscapeRight
case .landscapeRight: videoOrientation = .landscapeLeft
default: videoOrientation = .portrait
}
}
// 3. 应用方向修正
let orientation: CGImagePropertyOrientation
switch (videoOrientation, currentCameraPosition) {
case (.portrait, .back):
orientation = .up
case (.portrait, .front):
orientation = .upMirrored
case (.portraitUpsideDown, .back):
orientation = .down
case (.portraitUpsideDown, .front):
orientation = .downMirrored
case (.landscapeRight, .back):
orientation = .right
case (.landscapeRight, .front):
orientation = .rightMirrored
case (.landscapeLeft, .back):
orientation = .left
case (.landscapeLeft, .front):
orientation = .leftMirrored
default:
orientation = .up
}
// 应用方向变换
ciImage = ciImage.oriented(forExifOrientation: Int32(orientation.rawValue))
// 4. 对于前置摄像头,可能需要额外的水平翻转
if currentCameraPosition == .front {
ciImage = ciImage.transformed(by: CGAffineTransform(scaleX: -1, y: 1))
}
// 5. 转换为 UIImage(保持高质量)
let context = CIContext(options: [
.useSoftwareRenderer: false,
.highQualityDownsample: true,
.workingColorSpace: CGColorSpaceCreateDeviceRGB()
])
guard let cgImage = context.createCGImage(ciImage, from: ciImage.extent) else {
return nil
}
// 创建 UIImage,确保方向正确
let image = UIImage(cgImage: cgImage, scale: 1.0, orientation: .up)
return image
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard isRecording else { return }
// 存储最后一个样本缓冲区
self.lastSampleBuffer = sampleBuffer
// 处理水印
guard let image = self.convertSampleBufferToUIImage(sampleBuffer),
let watermarkView = self.watermarkView else { return }
self.processedImageSize = image.size
var targetFrame = CGRect.zero
DispatchQueue.main.sync {
targetFrame = convertPreviewFrameToVideoFrame(
previewFrame: watermarkView
.frame)
}
guard let watermarkedImage = image.addVideoWatermark(watermarkView, targetFrame: targetFrame) else { return }
guard let pixelBuffer = self.imageToCVPixelBuffer(image: watermarkedImage) else {
return
}
recordingQueue.async { [weak self] in
guard let self = self,
self.isRecording,
let assetWriter = self.assetWriter,
let assetWriterInput = self.assetWriterInput,
let adaptor = self.assetWriterInputPixelBufferAdaptor else { return }
let timestamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
// 首次写入时启动会话(关键修复)
if self.recordingStartTime == nil {
self.recordingStartTime = timestamp
assetWriter.startSession(atSourceTime: timestamp)
}
// 确保会话已启动且输入准备好
guard assetWriter.status == .writing,
assetWriterInput.isReadyForMoreMediaData else {
print("录制状态异常: \(assetWriter.status.rawValue), 输入准备状态: \(assetWriterInput.isReadyForMoreMediaData)")
return
}
// 验证像素缓冲区尺寸
let bufferWidth = CVPixelBufferGetWidth(pixelBuffer)
let bufferHeight = CVPixelBufferGetHeight(pixelBuffer)
tlog.debug("像素缓冲区尺寸: \(bufferWidth)x\(bufferHeight)")
// 追加像素缓冲区
if !adaptor.append(pixelBuffer, withPresentationTime: timestamp) {
print("追加帧失败: \(assetWriter.error?.localizedDescription ?? "未知错误")")
}
}
}
// UIImage -> CVPixelBuffer (修正颜色和变形问题)
func imageToCVPixelBuffer(image: UIImage) -> CVPixelBuffer? {
let width = Int(image.size.width)
let height = Int(image.size.height)
let attrs = [
kCVPixelBufferCGImageCompatibilityKey: kCFBooleanTrue,
kCVPixelBufferCGBitmapContextCompatibilityKey: kCFBooleanTrue,
kCVPixelBufferIOSurfacePropertiesKey: [:] as CFDictionary
] as CFDictionary
var pixelBuffer: CVPixelBuffer?
let status = CVPixelBufferCreate(
kCFAllocatorDefault,
width,
height,
kCVPixelFormatType_32BGRA,
attrs,
&pixelBuffer
)
guard status == kCVReturnSuccess, let buffer = pixelBuffer else {
print("创建像素缓冲区失败,状态: \(status)")
return nil
}
CVPixelBufferLockBaseAddress(buffer, [])
defer { CVPixelBufferUnlockBaseAddress(buffer, []) }
// 使用更直接的方式将图像数据写入像素缓冲区
guard let context = CGContext(
data: CVPixelBufferGetBaseAddress(buffer),
width: width,
height: height,
bitsPerComponent: 8,
bytesPerRow: CVPixelBufferGetBytesPerRow(buffer),
space: CGColorSpaceCreateDeviceRGB(),
bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue
), let cgImage = image.cgImage else {
return nil
}
context.draw(cgImage, in: CGRect(x: 0, y: 0, width: width, height: height))
return buffer
}
}
extension UIImage {
// 从 CVPixelBuffer 创建 UIImage
convenience init?(pixelBuffer: CVPixelBuffer) {
// 锁定像素缓冲区
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
defer { CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly) }
// 获取像素缓冲区信息
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
// 创建 CGImage
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(
rawValue: CGBitmapInfo.byteOrder32Little.rawValue |
CGImageAlphaInfo.premultipliedFirst.rawValue
)
guard let context = CGContext(
data: baseAddress,
width: width,
height: height,
bitsPerComponent: 8,
bytesPerRow: bytesPerRow,
space: colorSpace,
bitmapInfo: bitmapInfo.rawValue
), let cgImage = context.makeImage() else {
return nil
}
// 初始化 UIImage
self.init(cgImage: cgImage)
}
}
帮我分析一下 let videoSettings: [String: Any] = [
AVVideoCodecKey: AVVideoCodecType.h264,
AVVideoWidthKey: 1280,
AVVideoHeightKey: 720,
AVVideoScalingModeKey: AVVideoScalingModeResizeAspectFill,
AVVideoCompressionPropertiesKey: [
AVVideoAverageBitRateKey: 20_000_000, // 直接设置为12 Mbps,确保高质量
AVVideoProfileLevelKey: AVVideoProfileLevelH264HighAutoLevel,
AVVideoMaxKeyFrameIntervalKey: frameRate, // 每两秒一个关健帧
AVVideoAllowFrameReorderingKey: true,
AVVideoExpectedSourceFrameRateKey: frameRate,
AVVideoQualityKey: 0.9, // 质量设置 (0.0-1.0)
AVVideoMaxKeyFrameIntervalDurationKey: 1, // 关键帧最大间隔(秒)
]
]
这里这样设置了 画面是清晰了,但录制的视频播放画面卡吨,添加的水印也没有显示出来,只显示画面一问部分
最新发布