为什么 Yp Cb Cr 图像缓冲区在 iOS 13 中全部洗牌?
Why is Yp Cb Cr image buffer all shuffled in iOS 13?
我有一个计算机视觉应用程序,可以从传感器获取灰度图像并对其进行处理。 iOS 的图像采集是用 Obj-C 编写的,图像处理是使用 OpenCV 在 C++ 中执行的。因为我只需要亮度数据,所以我以 YUV(或 Yp Cb Cr)420 双平面全范围格式获取图像,并将缓冲区的数据分配给 OpenCV Mat 对象(参见下面的获取代码)。到目前为止效果很好,直到全新的 iOS 13 出来...出于某种原因,在 iOS 13 上,我获得的图像未对齐,导致斜条纹。通过查看我获得的图像,我怀疑这是缓冲区的 Y Cb 和 Cr 组件的顺序发生变化或缓冲区步幅发生变化的结果。有谁知道 iOS 13 是否引入了这种更改以及我如何更新我的代码以避免这种情况,最好是以向后兼容的方式?
下面是我的图片获取代码:
//capture config
- (void)initialize {
AVCaptureDevice *frontCameraDevice;
NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
for (AVCaptureDevice *device in devices) {
if (device.position == AVCaptureDevicePositionFront) {
frontCameraDevice = device;
}
}
if (frontCameraDevice == nil) {
NSLog(@"Front camera device not found");
return;
}
_session = [[AVCaptureSession alloc] init];
_session.sessionPreset = AVCaptureSessionPreset640x480;
NSError *error = nil;
AVCaptureDeviceInput *input = [AVCaptureDeviceInput deviceInputWithDevice:frontCameraDevice error: &error];
if (error != nil) {
NSLog(@"Error getting front camera device input: %@", error);
}
if ([_session canAddInput:input]) {
[_session addInput:input];
} else {
NSLog(@"Could not add front camera device input to session");
}
AVCaptureVideoDataOutput *videoOutput = [[AVCaptureVideoDataOutput alloc] init];
// This is the default, but making it explicit
videoOutput.alwaysDiscardsLateVideoFrames = YES;
if ([videoOutput.availableVideoCVPixelFormatTypes containsObject:
[NSNumber numberWithInt:kCVPixelFormatType_420YpCbCr8BiPlanarFullRange]]) {
OSType format = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
videoOutput.videoSettings = [NSDictionary dictionaryWithObject:[NSNumber numberWithUnsignedInt:format]
forKey:(id)kCVPixelBufferPixelFormatTypeKey];
} else {
NSLog(@"YUV format not available");
}
[videoOutput setSampleBufferDelegate:self queue:dispatch_queue_create("extrapage.camera.capture.sample.buffer.delegate", DISPATCH_QUEUE_SERIAL)];
if ([_session canAddOutput:videoOutput]) {
[_session addOutput:videoOutput];
} else {
NSLog(@"Could not add video output to session");
}
AVCaptureConnection *captureConnection = [videoOutput connectionWithMediaType:AVMediaTypeVideo];
captureConnection.videoOrientation = AVCaptureVideoOrientationPortrait;
}
//acquisition code
- (void)captureOutput:(AVCaptureOutput *)output didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
if (_listener != nil) {
CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
OSType format = CVPixelBufferGetPixelFormatType(pixelBuffer);
NSAssert(format == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange, @"Only YUV is supported");
// The first plane / channel (at index 0) is the grayscale plane
// See more infomation about the YUV format
// http://en.wikipedia.org/wiki/YUV
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
void *baseaddress = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0);
CGFloat width = CVPixelBufferGetWidth(pixelBuffer);
CGFloat height = CVPixelBufferGetHeight(pixelBuffer);
cv::Mat frame(height, width, CV_8UC1, baseaddress, 0);
[_listener onNewFrame:frame];
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
}
}
这不是答案,但我们有类似的问题。我尝试在照片拍摄中使用不同的分辨率,只有一种分辨率 (2592 x 1936) 不起作用,其他分辨率都起作用。例如,我认为将分辨率更改为 1440x1920 可能是您问题的解决方法。
我找到了解决这个问题的方法。这是行跨度问题:显然,在 iOS 13 中,Yp Cb Cr 4:2:0 8 位双平面缓冲区的行跨度已更改。也许它总是 2 的幂。因此在某些情况下,行步幅不再与宽度相同。我就是这种情况。修复很简单,只需从缓冲区的信息中获取行步幅并将其传递给 OpenCV Mat 的构造函数,如下所示。
void *baseaddress = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0);
size_t width = CVPixelBufferGetWidthOfPlane(pixelBuffer, 0);
size_t height = CVPixelBufferGetHeightOfPlane(pixelBuffer, 0);
size_t bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0);
cv::Mat frame(height, width, CV_8UC1, baseaddress, bytesPerRow);
请注意,我还通过使用平面尺寸而不是缓冲区尺寸更改了获取宽度和高度的方式。对于 Y 平面,它应该始终相同。我不确定这是否有所作为。
另请注意:在 Xcode 更新以支持 iOS 13 SDK 后,我不得不从测试设备上卸载我的应用程序,否则 Xcode 会保留 运行 旧版本而不是新编译的版本。
我有一个计算机视觉应用程序,可以从传感器获取灰度图像并对其进行处理。 iOS 的图像采集是用 Obj-C 编写的,图像处理是使用 OpenCV 在 C++ 中执行的。因为我只需要亮度数据,所以我以 YUV(或 Yp Cb Cr)420 双平面全范围格式获取图像,并将缓冲区的数据分配给 OpenCV Mat 对象(参见下面的获取代码)。到目前为止效果很好,直到全新的 iOS 13 出来...出于某种原因,在 iOS 13 上,我获得的图像未对齐,导致斜条纹。通过查看我获得的图像,我怀疑这是缓冲区的 Y Cb 和 Cr 组件的顺序发生变化或缓冲区步幅发生变化的结果。有谁知道 iOS 13 是否引入了这种更改以及我如何更新我的代码以避免这种情况,最好是以向后兼容的方式?
下面是我的图片获取代码:
//capture config
- (void)initialize {
AVCaptureDevice *frontCameraDevice;
NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
for (AVCaptureDevice *device in devices) {
if (device.position == AVCaptureDevicePositionFront) {
frontCameraDevice = device;
}
}
if (frontCameraDevice == nil) {
NSLog(@"Front camera device not found");
return;
}
_session = [[AVCaptureSession alloc] init];
_session.sessionPreset = AVCaptureSessionPreset640x480;
NSError *error = nil;
AVCaptureDeviceInput *input = [AVCaptureDeviceInput deviceInputWithDevice:frontCameraDevice error: &error];
if (error != nil) {
NSLog(@"Error getting front camera device input: %@", error);
}
if ([_session canAddInput:input]) {
[_session addInput:input];
} else {
NSLog(@"Could not add front camera device input to session");
}
AVCaptureVideoDataOutput *videoOutput = [[AVCaptureVideoDataOutput alloc] init];
// This is the default, but making it explicit
videoOutput.alwaysDiscardsLateVideoFrames = YES;
if ([videoOutput.availableVideoCVPixelFormatTypes containsObject:
[NSNumber numberWithInt:kCVPixelFormatType_420YpCbCr8BiPlanarFullRange]]) {
OSType format = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
videoOutput.videoSettings = [NSDictionary dictionaryWithObject:[NSNumber numberWithUnsignedInt:format]
forKey:(id)kCVPixelBufferPixelFormatTypeKey];
} else {
NSLog(@"YUV format not available");
}
[videoOutput setSampleBufferDelegate:self queue:dispatch_queue_create("extrapage.camera.capture.sample.buffer.delegate", DISPATCH_QUEUE_SERIAL)];
if ([_session canAddOutput:videoOutput]) {
[_session addOutput:videoOutput];
} else {
NSLog(@"Could not add video output to session");
}
AVCaptureConnection *captureConnection = [videoOutput connectionWithMediaType:AVMediaTypeVideo];
captureConnection.videoOrientation = AVCaptureVideoOrientationPortrait;
}
//acquisition code
- (void)captureOutput:(AVCaptureOutput *)output didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
if (_listener != nil) {
CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
OSType format = CVPixelBufferGetPixelFormatType(pixelBuffer);
NSAssert(format == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange, @"Only YUV is supported");
// The first plane / channel (at index 0) is the grayscale plane
// See more infomation about the YUV format
// http://en.wikipedia.org/wiki/YUV
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
void *baseaddress = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0);
CGFloat width = CVPixelBufferGetWidth(pixelBuffer);
CGFloat height = CVPixelBufferGetHeight(pixelBuffer);
cv::Mat frame(height, width, CV_8UC1, baseaddress, 0);
[_listener onNewFrame:frame];
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
}
}
这不是答案,但我们有类似的问题。我尝试在照片拍摄中使用不同的分辨率,只有一种分辨率 (2592 x 1936) 不起作用,其他分辨率都起作用。例如,我认为将分辨率更改为 1440x1920 可能是您问题的解决方法。
我找到了解决这个问题的方法。这是行跨度问题:显然,在 iOS 13 中,Yp Cb Cr 4:2:0 8 位双平面缓冲区的行跨度已更改。也许它总是 2 的幂。因此在某些情况下,行步幅不再与宽度相同。我就是这种情况。修复很简单,只需从缓冲区的信息中获取行步幅并将其传递给 OpenCV Mat 的构造函数,如下所示。
void *baseaddress = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0);
size_t width = CVPixelBufferGetWidthOfPlane(pixelBuffer, 0);
size_t height = CVPixelBufferGetHeightOfPlane(pixelBuffer, 0);
size_t bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0);
cv::Mat frame(height, width, CV_8UC1, baseaddress, bytesPerRow);
请注意,我还通过使用平面尺寸而不是缓冲区尺寸更改了获取宽度和高度的方式。对于 Y 平面,它应该始终相同。我不确定这是否有所作为。
另请注意:在 Xcode 更新以支持 iOS 13 SDK 后,我不得不从测试设备上卸载我的应用程序,否则 Xcode 会保留 运行 旧版本而不是新编译的版本。