无法在 Swift Playgrounds 上捕获视频数据,未调用 captureOutput AVCaptureVideoDataOutputSampleBufferDelegate 委托方法
Cannot capture video data on Swift Playgrounds, captureOutput AVCaptureVideoDataOutputSampleBufferDelegate delegate method not called
我想在 Swift Playgrounds iPad 应用程序上访问 iPad 的相机。我发现即使我的游乐场运行正常,也无法捕获视频数据。
captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!)
,AVCaptureVideoDataOutputSampleBufferDelegate
协议的委托方法,在我的 iOS 应用程序中未被调用(可能是因为没有视频数据传入)。
我 playground 中的视图应该显示 FaceTime 摄像头视图。为什么即使 Apple explicitly says it's allowed to do so 也无法显示相机输出?此外,Playground 应用会在我打开 playground 时立即询问我的相机权限,因此应该以某种方式允许它。
import UIKit
import CoreImage
import AVFoundation
import ImageIO
import PlaygroundSupport
class Visage: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate {
var visageCameraView : UIView = UIView()
fileprivate var faceDetector : CIDetector?
fileprivate var videoDataOutput : AVCaptureVideoDataOutput?
fileprivate var videoDataOutputQueue : DispatchQueue?
fileprivate var cameraPreviewLayer : AVCaptureVideoPreviewLayer?
fileprivate var captureSession : AVCaptureSession = AVCaptureSession()
fileprivate let notificationCenter : NotificationCenter = NotificationCenter.default
override init() {
super.init()
self.captureSetup(AVCaptureDevicePosition.front)
var faceDetectorOptions : [String : AnyObject]?
faceDetectorOptions = [CIDetectorAccuracy : CIDetectorAccuracyHigh as AnyObject]
self.faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: faceDetectorOptions)
}
func beginFaceDetection() {
self.captureSession.startRunning()
}
func endFaceDetection() {
self.captureSession.stopRunning()
}
fileprivate func captureSetup (_ position : AVCaptureDevicePosition) {
var captureError : NSError?
var captureDevice : AVCaptureDevice!
for testedDevice in AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo){
if ((testedDevice as AnyObject).position == position) {
captureDevice = testedDevice as! AVCaptureDevice
}
}
if (captureDevice == nil) {
captureDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
}
var deviceInput : AVCaptureDeviceInput?
do {
deviceInput = try AVCaptureDeviceInput(device: captureDevice)
} catch let error as NSError {
captureError = error
deviceInput = nil
}
captureSession.sessionPreset = AVCaptureSessionPresetHigh
if (captureError == nil) {
if (captureSession.canAddInput(deviceInput)) {
captureSession.addInput(deviceInput)
}
self.videoDataOutput = AVCaptureVideoDataOutput()
self.videoDataOutput!.videoSettings = [kCVPixelBufferPixelFormatTypeKey as AnyHashable: Int(kCVPixelFormatType_32BGRA)]
self.videoDataOutput!.alwaysDiscardsLateVideoFrames = true
self.videoDataOutputQueue = DispatchQueue(label: "VideoDataOutputQueue", attributes: [])
self.videoDataOutput!.setSampleBufferDelegate(self, queue: self.videoDataOutputQueue!)
if (captureSession.canAddOutput(self.videoDataOutput)) {
captureSession.addOutput(self.videoDataOutput)
}
}
visageCameraView.frame = UIScreen.main.bounds
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.frame = UIScreen.main.bounds
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
visageCameraView.layer.addSublayer(previewLayer!)
}
// NOT CALLED
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
print("delegate method called!")
}
}
class SmileView: UIView {
let smileView = UIView()
var smileRec: Visage!
override init(frame: CGRect) {
super.init(frame: frame)
self.addSubview(smileView)
self.translatesAutoresizingMaskIntoConstraints = false
smileRec = Visage()
smileRec.beginFaceDetection()
let cameraView = smileRec.visageCameraView
self.addSubview(cameraView)
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
let frame = CGRect(x: 0, y: 0, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)
let sView = SmileView(frame: frame)
PlaygroundPage.current.liveView = sView
我认为您需要设置 needsIndefiniteExecution
属性 以便在您的代码完成后不会停止执行。来自苹果:
By default, all top-level code is executed, and then execution is
terminated. When working with asynchronous code, enable indefinite
execution to allow execution to continue after the end of the
playground’s top-level code is reached. This, in turn, gives threads
and callbacks time to execute.
Editing the playground automatically stops execution, even when
indefinite execution is enabled.
Set needsIndefiniteExecution to true to continue execution after the
end of top-level code. set it to false to stop execution at that
point.
所以最后可能的代码是:
let frame = CGRect(x: 0, y: 0, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)
let sView = SmileView(frame: frame)
PlaygroundPage.current.needsIndefiniteExecution = true
PlaygroundPage.current.liveView = sView
编辑:这应该已经修复:)
--
编辑:Apple 已确认这是一个错误。
我已经提交了一个错误报告,当有新的官方信息出现时我会更新这个答案。
我想在 Swift Playgrounds iPad 应用程序上访问 iPad 的相机。我发现即使我的游乐场运行正常,也无法捕获视频数据。
captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!)
,AVCaptureVideoDataOutputSampleBufferDelegate
协议的委托方法,在我的 iOS 应用程序中未被调用(可能是因为没有视频数据传入)。
我 playground 中的视图应该显示 FaceTime 摄像头视图。为什么即使 Apple explicitly says it's allowed to do so 也无法显示相机输出?此外,Playground 应用会在我打开 playground 时立即询问我的相机权限,因此应该以某种方式允许它。
import UIKit
import CoreImage
import AVFoundation
import ImageIO
import PlaygroundSupport
class Visage: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate {
var visageCameraView : UIView = UIView()
fileprivate var faceDetector : CIDetector?
fileprivate var videoDataOutput : AVCaptureVideoDataOutput?
fileprivate var videoDataOutputQueue : DispatchQueue?
fileprivate var cameraPreviewLayer : AVCaptureVideoPreviewLayer?
fileprivate var captureSession : AVCaptureSession = AVCaptureSession()
fileprivate let notificationCenter : NotificationCenter = NotificationCenter.default
override init() {
super.init()
self.captureSetup(AVCaptureDevicePosition.front)
var faceDetectorOptions : [String : AnyObject]?
faceDetectorOptions = [CIDetectorAccuracy : CIDetectorAccuracyHigh as AnyObject]
self.faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: faceDetectorOptions)
}
func beginFaceDetection() {
self.captureSession.startRunning()
}
func endFaceDetection() {
self.captureSession.stopRunning()
}
fileprivate func captureSetup (_ position : AVCaptureDevicePosition) {
var captureError : NSError?
var captureDevice : AVCaptureDevice!
for testedDevice in AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo){
if ((testedDevice as AnyObject).position == position) {
captureDevice = testedDevice as! AVCaptureDevice
}
}
if (captureDevice == nil) {
captureDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
}
var deviceInput : AVCaptureDeviceInput?
do {
deviceInput = try AVCaptureDeviceInput(device: captureDevice)
} catch let error as NSError {
captureError = error
deviceInput = nil
}
captureSession.sessionPreset = AVCaptureSessionPresetHigh
if (captureError == nil) {
if (captureSession.canAddInput(deviceInput)) {
captureSession.addInput(deviceInput)
}
self.videoDataOutput = AVCaptureVideoDataOutput()
self.videoDataOutput!.videoSettings = [kCVPixelBufferPixelFormatTypeKey as AnyHashable: Int(kCVPixelFormatType_32BGRA)]
self.videoDataOutput!.alwaysDiscardsLateVideoFrames = true
self.videoDataOutputQueue = DispatchQueue(label: "VideoDataOutputQueue", attributes: [])
self.videoDataOutput!.setSampleBufferDelegate(self, queue: self.videoDataOutputQueue!)
if (captureSession.canAddOutput(self.videoDataOutput)) {
captureSession.addOutput(self.videoDataOutput)
}
}
visageCameraView.frame = UIScreen.main.bounds
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.frame = UIScreen.main.bounds
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
visageCameraView.layer.addSublayer(previewLayer!)
}
// NOT CALLED
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
print("delegate method called!")
}
}
class SmileView: UIView {
let smileView = UIView()
var smileRec: Visage!
override init(frame: CGRect) {
super.init(frame: frame)
self.addSubview(smileView)
self.translatesAutoresizingMaskIntoConstraints = false
smileRec = Visage()
smileRec.beginFaceDetection()
let cameraView = smileRec.visageCameraView
self.addSubview(cameraView)
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
let frame = CGRect(x: 0, y: 0, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)
let sView = SmileView(frame: frame)
PlaygroundPage.current.liveView = sView
我认为您需要设置 needsIndefiniteExecution
属性 以便在您的代码完成后不会停止执行。来自苹果:
By default, all top-level code is executed, and then execution is terminated. When working with asynchronous code, enable indefinite execution to allow execution to continue after the end of the playground’s top-level code is reached. This, in turn, gives threads and callbacks time to execute.
Editing the playground automatically stops execution, even when indefinite execution is enabled.
Set needsIndefiniteExecution to true to continue execution after the end of top-level code. set it to false to stop execution at that point.
所以最后可能的代码是:
let frame = CGRect(x: 0, y: 0, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)
let sView = SmileView(frame: frame)
PlaygroundPage.current.needsIndefiniteExecution = true
PlaygroundPage.current.liveView = sView
编辑:这应该已经修复:)
--
编辑:Apple 已确认这是一个错误。
我已经提交了一个错误报告,当有新的官方信息出现时我会更新这个答案。