通过模仿 Boomerang 掌握摄像头编程接口

本文分享了使用Swift实现Instagram的Boomerang应用的过程。主要介绍了如何利用AVFoundation创建自定义拍摄界面,实现闪光灯控制及前后摄像头切换,并将多张图片合成为短视频的核心功能。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

Boomerang是Instagram推出的一款应用,具体介绍可到这: https://itunes.apple.com/cn/app/id1041596399

试用了一下,觉得蛮有意思的,于是自己也学着用Swift做了一个,虽然和原版比起来有差距,但基本功能都实现了,现在总结一下用到的几个知识点:

使用 AVFoundation 实现自定义拍摄界面

private var session : AVCaptureSession!
private var device : AVCaptureDevice!
private var deviceInput: AVCaptureDeviceInput!
private var videoDataOutput: AVCaptureVideoDataOutput!

private func setupCamera(type: CameraType) {
    self.session = AVCaptureSession()
    self.device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
    
    self.deviceInput = try? AVCaptureDeviceInput(device: self.device)
    if self.session.canAddInput(self.deviceInput) {
        self.session.addInput(self.deviceInput)
    }
    
    self.videoDataOutput = AVCaptureVideoDataOutput()
    self.videoDataOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as AnyHashable : Int(kCVPixelFormatType_32BGRA)]
    if self.session.canAddOutput(self.videoDataOutput) {
        self.session.addOutput(self.videoDataOutput)
    }
            
    let queue: DispatchQueue = DispatchQueue(label: "queue")
    self.videoDataOutput.setSampleBufferDelegate(self, queue: queue)
    
    let videoLayer = AVCaptureVideoPreviewLayer(session: self.session)
    videoLayer?.frame = CGRect(x: 0.0, y: 0.0, width: self.frame.size.width, height: self.frame.size.height)
    videoLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
    self.layer.addSublayer(videoLayer!)
        
    self.session.startRunning()
}

闪光灯是否一直打开

func torchSwitch() {
    if !RRCameraView.isCameraAvailable() {
        return
    }
        
    guard self.device.hasFlash else {
         // 变量不符合条件判断时,执行下面代码
         return
    }
        
    do {
        try self.device.lockForConfiguration()
    }
    catch {
    
    }
        
    switch self.device.torchMode {
        case .on:
            self.device.torchMode = .off
        case .off:
            self.device.torchMode = .on
        case .auto:
            self.device.torchMode = .auto
    }
        
    self.device.unlockForConfiguration()
        
    self.session.startRunning()
}​

前后摄像头切换

func flipSwitch() {
    self.session.stopRunning()
    self.session.beginConfiguration()
        
    for input in self.session.inputs {
        self.session.removeInput(input as! AVCaptureInput)
    }
        
    let position = (self.deviceInput?.device.position == .front) ? AVCaptureDevicePosition.back : .front
        
    for device in AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo) {
        if let device = device as? AVCaptureDevice , device.position == position {
            self.deviceInput = try? AVCaptureDeviceInput(device: device)
            self.session.addInput(deviceInput)
        }
    }
        
    self.session.commitConfiguration()
    self.startRunning()
}

把摄像头数据转换成UIImage

func getImageFromData(sampleBuffer: CMSampleBuffer) -> UIImage? {
    let ciImage : CIImage = CIImage(cvPixelBuffer: sampleBuffer as! CVPixelBuffer)
    let context : CIContext = CIContext(options: nil)
    let myImage : CGImage = context.createCGImage(ciImage, from: CGRect(x: 0, y: 0, width: CVPixelBufferGetWidth(sampleBuffer as! CVPixelBuffer), height: CVPixelBufferGetHeight(sampleBuffer as! CVPixelBuffer)))!
        
    let uiImage : UIImage = UIImage(cgImage: myImage)
    return uiImage
}

把拍摄的多张照片按顺序和倒序合并成一个短视频,这是这个应用的重点所在

func generateVideo(images: [UIImage], videoPath: String) {
    var outputSize : CGSize = (images.first?.size)!
    let videoUrl = URL(fileURLWithPath: videoPath)
    guard let videoWriter = try? AVAssetWriter(outputURL: videoUrl, fileType: AVFileTypeMPEG4) else {
        fatalError("AVAssetWriter error")
    }
        
    let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(value: Float(outputSize.width) as Float), AVVideoHeightKey : NSNumber(value: Float(outputSize.height) as Float)] as [String : Any]
    let videoWriterInput : AVAssetWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
        
    let attributes = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_32ARGB as UInt32)]
    let pixelBufferAdaptor : AVAssetWriterInputPixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor.init(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: attributes)
        
    if videoWriter.canAdd(videoWriterInput) {
        videoWriter.add(videoWriterInput)
    }
        
    if videoWriter.startWriting() {
        videoWriter.startSession(atSourceTime: kCMTimeZero)
        assert(pixelBufferAdaptor.pixelBufferPool != nil)
        
        let queue = DispatchQueue(label: "mediaInputQueue", attributes: [])
        videoWriterInput.requestMediaDataWhenReady(on: queue, using: {
            let fps: Int32 = 80
            let frameDuration = CMTimeMake(1, fps)
            var frameCount: Int64 = 0
            var appendSucceeded = true
            for image in images {
                if videoWriterInput.isReadyForMoreMediaData {
                    let lastFrameTime = CMTimeMake(frameCount, fps)
                    let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
                    var pixelBuffer: CVPixelBuffer? = nil
                    let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
                        
                    if let pixelBuffer = pixelBuffer , status == 0 {
                        let managedPixelBuffer = pixelBuffer
                        CVPixelBufferLockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
                        let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
                        let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
                        let context = CGContext(data: data, width: Int(outputSize.width), height: Int(outputSize.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(managedPixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
                        context?.clear(CGRect(x: 0, y: 0, width: CGFloat(outputSize.width), height: CGFloat(outputSize.height)))
                        let horizontalRatio = CGFloat(outputSize.width) / image.size.width
                        let verticalRatio = CGFloat(outputSize.height) / image.size.height
                        let aspectRatio = min(horizontalRatio, verticalRatio)
                        let newSize: CGSize = CGSize(width: image.size.width * aspectRatio, height: image.size.height * aspectRatio)
                        let x = newSize.width < outputSize.width ? (outputSize.width - newSize.width) / 2 : 0
                        let y = newSize.height < outputSize.height ? (outputSize.height - newSize.height) / 2 : 0
                        context?.draw(image.cgImage!, in: CGRect(x: x, y: y, width: newSize.width, height: newSize.height))
                        CVPixelBufferUnlockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
                        appendSucceeded = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
                    }
                    else {
                        print("Failed")
                        break
                    }
                    frameCount += 1
                }
            }
            
            videoWriterInput.markAsFinished()
            videoWriter.finishWriting(completionHandler: {
                print("转换视频完成")
            })
        })
    }
}

转载于:https://my.oschina.net/ilrrong/blog/752719

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值