Last active
November 21, 2019 15:39
-
-
Save v-oleksandr/90fe7d25edfac5d66ee256c01522f88a to your computer and use it in GitHub Desktop.
[iOS][Swift] SnapshotRenderer- WebRTC custom video renderer, implemented to take snapshots from remote (or local) stream
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| // | |
| // SnapshotRenderer.swift | |
| // Smart Home | |
| // | |
| // Created by Oleksandr Vitruk on 20.11.2019. | |
| // Copyright © 2019 codemeister64. All rights reserved. | |
| // | |
| import Foundation | |
| import WebRTC | |
| /* ------ Usage ------ | |
| 1. Add SnapshotRenderer to video track: | |
| self.remoteVideoTrack?.add(snapshotRenderer) | |
| 2. Request screenshot when needed: | |
| snapshotRenderer.requestSnapshot { snapshot in | |
| guard let image = snapshot else { | |
| // handle nil case | |
| return | |
| } | |
| // processed image | |
| } | |
| */ | |
| protocol SnapshotRendererType: NSObject, RTCVideoRenderer { | |
| var size: CGSize? { get } | |
| func requestSnapshot(completionHandler: @escaping (UIImage?) -> Void) | |
| } | |
| class SnapshotRenderer: NSObject, SnapshotRendererType { | |
| // MARK: - Public properties | |
| private(set) var size: CGSize? | |
| // MARK: - Private properties | |
| private let queue = DispatchQueue(label: "com.webRTC.renderer.snapshot", | |
| qos: .userInteractive) | |
| private var isRenderingFrame: Bool = false | |
| private var completionHandler: ((UIImage?) -> Void)? | |
| // MARK: - RTCVideoRenderer | |
| func setSize(_ size: CGSize) { | |
| print("SnapshotRenderer: setSize \(size)") | |
| self.size = size | |
| } | |
| func renderFrame(_ frame: RTCVideoFrame?) { | |
| guard isRenderingFrame == false, | |
| completionHandler != nil else { | |
| // we don`t need to process frames | |
| return | |
| } | |
| guard let i420Frame = frame?.newI420() else { | |
| print("SnapshotRenderer: Unable to get i420 Frame") | |
| // there is no frame to process | |
| completionHandler?(nil) | |
| completionHandler = nil | |
| return | |
| } | |
| // get buffer | |
| let buffer = i420Frame.buffer.toI420() | |
| // lock next frame processing | |
| isRenderingFrame = true | |
| // process frame in background non-blocking queue. Try to convert data to UIImage | |
| queue.async { [weak self] in | |
| let image = self?.processFrame(buffer: buffer) | |
| // complete processing and unblock renderer | |
| self?.completionHandler?(image) | |
| self?.completionHandler = nil | |
| self?.isRenderingFrame = false | |
| } | |
| } | |
| // MARK: - SnapshotRendererType | |
| func requestSnapshot(completionHandler: @escaping (UIImage?) -> Void) { | |
| guard isRenderingFrame == false else { | |
| // already rendering frame | |
| completionHandler(nil) | |
| return | |
| } | |
| self.completionHandler = completionHandler | |
| } | |
| // MARK: - Processing | |
| private func processFrame(buffer: RTCI420BufferProtocol) -> UIImage? { | |
| let width = Int(buffer.width) | |
| let height = Int(buffer.height) | |
| print("SnapshotRenderer: size w = \(width), h = \(height)") | |
| // YUV(i420) -> YUV(NV12) -> CIImage -> UIImage | |
| // 1. YUV(i420) -> YUV(NV12) | |
| guard let pixelBuffer = CVPixelBuffer.createPixelBuffer(width: width, height: height) else { | |
| print("SnapshotRenderer: Error could not create pixel buffer") | |
| return nil | |
| } | |
| let result = convertFrameFormat(pixelBuffer: pixelBuffer, | |
| width: width, | |
| height: height, | |
| dataY: buffer.dataY, | |
| dataU: buffer.dataU, | |
| dataV: buffer.dataV, | |
| strideY: Int(buffer.strideY), | |
| strideU: Int(buffer.strideU), | |
| strideV: Int(buffer.strideV)) | |
| guard result == true else { | |
| print("SnapshotRenderer: Error could not convert pixel buffer data (i420 -> NV12)") | |
| return nil | |
| } | |
| // 2. YUV(NV12) -> CIImage -> UIImage | |
| let frame = CGRect(x: 0.0, y: 0.0, width: CGFloat(width), height: CGFloat(height)) | |
| let image = pixelBuffer.toUIImage(frame: frame) | |
| return image | |
| } | |
| // swiftlint:disable function_parameter_count | |
| private func convertFrameFormat(pixelBuffer: CVPixelBuffer, | |
| width: Int, | |
| height: Int, | |
| dataY: UnsafePointer<UInt8>, | |
| dataU: UnsafePointer<UInt8>, | |
| dataV: UnsafePointer<UInt8>, | |
| strideY: Int, | |
| strideU: Int, | |
| strideV: Int) -> Bool { | |
| CVPixelBufferLockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: 0)) | |
| guard let yDestPlane = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0), | |
| let uvDestPlane = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1) else { | |
| print("SnapshotRenderer: Error could not get base address of plane for y or uv components") | |
| return false | |
| } | |
| let yByteBuffer = yDestPlane.assumingMemoryBound(to: UInt8.self) | |
| let uvByteBuffer = uvDestPlane.assumingMemoryBound(to: UInt8.self) | |
| var k = 0 | |
| for i in 0..<height { | |
| for j in 0..<width { | |
| let offset = j + i * strideY | |
| yByteBuffer[k] = dataY[offset] | |
| k += 1 | |
| } | |
| } | |
| k = 0 | |
| for i in 0..<(height / 2) { | |
| for j in 0..<(width / 2) { | |
| let uOffset = j + i * strideU | |
| let vOffset = j + i * strideV | |
| uvByteBuffer[k] = dataU[uOffset] | |
| k += 1 | |
| uvByteBuffer[k] = dataV[vOffset] | |
| k += 1 | |
| } | |
| } | |
| CVPixelBufferUnlockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: 0)) | |
| return true | |
| } | |
| } | |
| private extension CVPixelBuffer { | |
| // https://github.com/hollance/CoreMLHelpers/blob/master/CoreMLHelpers/CVPixelBuffer%2BHelpers.swift | |
| /// initialize new pixel buffer | |
| /// - Parameters: | |
| /// - width: frame width | |
| /// - height: frame height | |
| /// - allocator: default kCFAllocatorDefault | |
| /// - pixelFormat: default kCVPixelFormatType_420YpCbCr8BiPlanarFullRange. Bi-Planar Component Y'CbCr 8-bit 4:2:0, full-range (luma=[0,255] chroma=[1,255]). baseAddr points to a big-endian CVPlanarPixelBufferInfo_YCbCrBiPlanar struct | |
| /// - pixelBufferAttributes: default nil | |
| static func createPixelBuffer(width: Int, | |
| height: Int, | |
| allocator: CFAllocator? = kCFAllocatorDefault, | |
| pixelFormat: OSType = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange, | |
| pixelBufferAttributes: CFDictionary? = nil) -> CVPixelBuffer? { | |
| var pixelBuffer: CVPixelBuffer? | |
| let status = CVPixelBufferCreate(allocator, | |
| width, | |
| height, | |
| pixelFormat, | |
| pixelBufferAttributes, | |
| &pixelBuffer) | |
| if status != kCVReturnSuccess { | |
| print("SnapshotRenderer: Error could not create pixel buffer", status) | |
| return nil | |
| } | |
| return pixelBuffer | |
| } | |
| /// convert pixel buffer to UIImage | |
| /// - Parameters: | |
| /// - frame: expected image frame | |
| /// - scale: expected image scale | |
| /// - orientation: expected image orientation | |
| func toUIImage(frame: CGRect, | |
| scale: CGFloat = 1.0, | |
| orientation: UIImage.Orientation = .up) -> UIImage? { | |
| let ciImage = CIImage(cvPixelBuffer: self) | |
| let context: CIContext = CIContext() | |
| guard let cgImage = context.createCGImage(ciImage, from: frame) else { | |
| print("SnapshotRenderer: Error could not create CGImage from CIImage") | |
| return nil | |
| } | |
| let uiImage = UIImage(cgImage: cgImage, scale: scale, orientation: orientation) | |
| return uiImage | |
| } | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment