Swift:视频以一种尺寸录制,但以错误的尺寸呈现
Swift: video records at one size but renders at wrong size
目标是在 Swift 的设备上捕获全屏视频。在下面的代码中,视频捕获似乎发生在全屏(同时录制相机预览使用全屏),但视频的呈现发生在不同的分辨率。特别是对于 5S,似乎捕获发生在 320x568
,但渲染发生在 320x480
。
如何捕获和渲染全屏视频?
视频捕获代码:
private func initPBJVision() {
// Store PBJVision in var for convenience
let vision = PBJVision.sharedInstance()
// Configure PBJVision
vision.delegate = self
vision.cameraMode = PBJCameraMode.Video
vision.cameraOrientation = PBJCameraOrientation.Portrait
vision.focusMode = PBJFocusMode.ContinuousAutoFocus
vision.outputFormat = PBJOutputFormat.Preset
vision.cameraDevice = PBJCameraDevice.Back
// Let taps start/pause recording
let tapHandler = UITapGestureRecognizer(target: self, action: "doTap:")
view.addGestureRecognizer(tapHandler)
// Log status
print("Configured PBJVision")
}
private func startCameraPreview() {
// Store PBJVision in var for convenience
let vision = PBJVision.sharedInstance()
// Connect PBJVision camera preview to <videoView>
// -- Get preview width
let deviceWidth = CGRectGetWidth(view.frame)
let deviceHeight = CGRectGetHeight(view.frame)
// -- Configure PBJVision's preview layer
let previewLayer = vision.previewLayer
previewLayer.frame = CGRectMake(0, 0, deviceWidth, deviceHeight)
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
...
}
视频渲染代码:
func exportVideo(fileUrl: NSURL) {
// Create main composition object
let videoAsset = AVURLAsset(URL: fileUrl, options: nil)
let mainComposition = AVMutableComposition()
let compositionVideoTrack = mainComposition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))
let compositionAudioTrack = mainComposition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))
// -- Extract and apply video & audio tracks to composition
let sourceVideoTrack = videoAsset.tracksWithMediaType(AVMediaTypeVideo)[0]
let sourceAudioTrack = videoAsset.tracksWithMediaType(AVMediaTypeAudio)[0]
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), ofTrack: sourceVideoTrack, atTime: kCMTimeZero)
} catch {
print("Error with insertTimeRange. Video error: \(error).")
}
do {
try compositionAudioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), ofTrack: sourceAudioTrack, atTime: kCMTimeZero)
} catch {
print("Error with insertTimeRange. Audio error: \(error).")
}
// Add text to video
// -- Create video composition object
let renderSize = compositionVideoTrack.naturalSize
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = renderSize
videoComposition.frameDuration = CMTimeMake(Int64(1), Int32(videoFrameRate))
// -- Add instruction to video composition object
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, videoAsset.duration)
let videoLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: compositionVideoTrack)
instruction.layerInstructions = [videoLayerInstruction]
videoComposition.instructions = [instruction]
// -- Define video frame
let videoFrame = CGRectMake(0, 0, renderSize.width, renderSize.height)
print("Video Frame: \(videoFrame)") // <-- Prints frame of 320x480 so render size already wrong here
...
如果我没猜错,您似乎误解了设备屏幕宽度不等于相机预览(和捕获)大小这一事实。
previewLayer
的 videoGravity
属性 指示如何 stretch/fit 在图层中预览。它不影响捕获输出。
输出的实际帧大小取决于当前 AVCaptureSession
的 sessionPreset 属性。正如我通过阅读 PBJVision lib 的 GitHub 存储库所理解的那样,它的单例为此具有 setter(称为 captureSessionPreset
)。您可以在 initPBJVision
方法中更改它。
There 您可以找到会话预设的可能值。
您可以指定 AVVideoWidthKey
和 AVVideoHeightKey
您还可以指定配置文件,AVVideoProfileLevelKey
即 AVVideoProfileLevelH264HighAutoLevel 以支持 4k,一位朋友 (Michael Lowin) 追踪到 属性 以帮助我们提高一些导出质量.
目标是在 Swift 的设备上捕获全屏视频。在下面的代码中,视频捕获似乎发生在全屏(同时录制相机预览使用全屏),但视频的呈现发生在不同的分辨率。特别是对于 5S,似乎捕获发生在 320x568
,但渲染发生在 320x480
。
如何捕获和渲染全屏视频?
视频捕获代码:
private func initPBJVision() {
// Store PBJVision in var for convenience
let vision = PBJVision.sharedInstance()
// Configure PBJVision
vision.delegate = self
vision.cameraMode = PBJCameraMode.Video
vision.cameraOrientation = PBJCameraOrientation.Portrait
vision.focusMode = PBJFocusMode.ContinuousAutoFocus
vision.outputFormat = PBJOutputFormat.Preset
vision.cameraDevice = PBJCameraDevice.Back
// Let taps start/pause recording
let tapHandler = UITapGestureRecognizer(target: self, action: "doTap:")
view.addGestureRecognizer(tapHandler)
// Log status
print("Configured PBJVision")
}
private func startCameraPreview() {
// Store PBJVision in var for convenience
let vision = PBJVision.sharedInstance()
// Connect PBJVision camera preview to <videoView>
// -- Get preview width
let deviceWidth = CGRectGetWidth(view.frame)
let deviceHeight = CGRectGetHeight(view.frame)
// -- Configure PBJVision's preview layer
let previewLayer = vision.previewLayer
previewLayer.frame = CGRectMake(0, 0, deviceWidth, deviceHeight)
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
...
}
视频渲染代码:
func exportVideo(fileUrl: NSURL) {
// Create main composition object
let videoAsset = AVURLAsset(URL: fileUrl, options: nil)
let mainComposition = AVMutableComposition()
let compositionVideoTrack = mainComposition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))
let compositionAudioTrack = mainComposition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))
// -- Extract and apply video & audio tracks to composition
let sourceVideoTrack = videoAsset.tracksWithMediaType(AVMediaTypeVideo)[0]
let sourceAudioTrack = videoAsset.tracksWithMediaType(AVMediaTypeAudio)[0]
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), ofTrack: sourceVideoTrack, atTime: kCMTimeZero)
} catch {
print("Error with insertTimeRange. Video error: \(error).")
}
do {
try compositionAudioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), ofTrack: sourceAudioTrack, atTime: kCMTimeZero)
} catch {
print("Error with insertTimeRange. Audio error: \(error).")
}
// Add text to video
// -- Create video composition object
let renderSize = compositionVideoTrack.naturalSize
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = renderSize
videoComposition.frameDuration = CMTimeMake(Int64(1), Int32(videoFrameRate))
// -- Add instruction to video composition object
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, videoAsset.duration)
let videoLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: compositionVideoTrack)
instruction.layerInstructions = [videoLayerInstruction]
videoComposition.instructions = [instruction]
// -- Define video frame
let videoFrame = CGRectMake(0, 0, renderSize.width, renderSize.height)
print("Video Frame: \(videoFrame)") // <-- Prints frame of 320x480 so render size already wrong here
...
如果我没猜错,您似乎误解了设备屏幕宽度不等于相机预览(和捕获)大小这一事实。
previewLayer
的 videoGravity
属性 指示如何 stretch/fit 在图层中预览。它不影响捕获输出。
输出的实际帧大小取决于当前 AVCaptureSession
的 sessionPreset 属性。正如我通过阅读 PBJVision lib 的 GitHub 存储库所理解的那样,它的单例为此具有 setter(称为 captureSessionPreset
)。您可以在 initPBJVision
方法中更改它。
There 您可以找到会话预设的可能值。
您可以指定 AVVideoWidthKey
和 AVVideoHeightKey
您还可以指定配置文件,AVVideoProfileLevelKey
即 AVVideoProfileLevelH264HighAutoLevel 以支持 4k,一位朋友 (Michael Lowin) 追踪到 属性 以帮助我们提高一些导出质量.