我如何 return mat of opencv.js to c++
How could I return mat of opencv.js to c++
正在为浏览器写一个webapp(wasm的概念证明),想return将网络摄像头捕获的垫子返回到c++站点以便处理图像并显示它(ui 是Qt5写的).
我怎么 return 垫子?我找到的解决方案是
- 通过 img.ucharPtr 迭代像素,将像素值复制到字符串,然后 return 将其复制到 C++。但是js迭代pixel比较慢,不是一个理想的方案。
使用 imencode 将 Mat 编码为 jpg 并 return 它,问题是当我调用 imencode 时,它给我错误消息 "cv.imencode is not a function"。我从 this link, it is official site 下载 opencv.js。
function captureFrame()
{
console.log("capture frame start");
console.log("cols = ", global_frame.cols, ", rows = ", global_frame.rows, ", type = ", global_frame.type(), ", steps = ", global_frame.step[0]);
global_cap.read(global_frame); // Read a frame from camera
console.log("convert from rgba 2 rgb");
cv.cvtColor(global_frame, global_rgb_frame, cv.COLOR_RGBA2RGB);
console.log("convert to byte64 string");
var base64_frame = cv.imencode(".jpg", global_rgb_frame).toString('base64');
var length_bytes = lengthBytesUTF8(base64_frame);
var string_on_wasm_heap = _malloc(length_bytes);
stringToUTF8(base64_frame, string_on_wasm_heap, length_bytes);
return string_on_wasm_heap;
}
如果我可以打开网络摄像头并直接从 C++ 访问框架,那就太完美了,但还没有找到方法。
如果我理解您的要求,您可以使用我从 Opencv thread.
中找到的下面的代码
只需截取视频捕获的最新帧的屏幕截图,然后return将其作为 mat 对象。
VideoCapture cap("video1.mp4");
if( !cap.isOpened())
{
cout << "Cannot open the video file" << endl;
return -1;
}
double count = cap.get(CV_CAP_PROP_FRAME_COUNT); //get the frame count
cap.set(CV_CAP_PROP_POS_FRAMES,count-1); //Set index to last frame
namedWindow("Screen Cap", CV_WINDOW_AUTOSIZE);
while(1)
{
Mat frame;
bool success = cap.read(frame);
if (!success){
cout << "Cannot read frame " << endl;
break;
}
imshow("MyVideo", frame);
if(waitKey(0) == 27) break;
}
如果您想进行更深入的分析,我在此处找到了这些 docs。
我找到了解决办法,很简单。
function captureFrame()
{
console.log("capture frame start");
console.log("cols = ", global_frame.cols, ", rows = ", global_frame.rows, ", type = ", global_frame.type(), ", steps = ", global_frame.step[0]);
global_cap.read(global_frame);
console.log("convert from rgba 2 rgb");
cv.cvtColor(global_frame, global_rgb_frame, cv.COLOR_RGBA2RGB);
HEAPU8.set(global_rgb_frame.data, global_buffer);
console.log("buffer values = ", buffer[0], ", ", buffer[1], ", ", buffer[2]);
console.log("global_rgb_frame.data values = ", global_rgb_frame.data[0], ", ", global_rgb_frame.data[1], ", ", global_rgb_frame.data[2]);
return global_buffer;
}
global_buffer是我在启动相机时初始化的堆,为了减少内存分配的需要,我把它作为一个全局变量,这个丑陋的技巧在我需要的时候很常见用不同的语言交流。
try{
// Get a permission from user to use a camera.
navigator.mediaDevices.getUserMedia(constraints)
.then(function(stream) {
global_camera.srcObject = stream;
let {width, height} = stream.getTracks()[0].getSettings();
global_height = height;
global_width = width;
console.log("width x height of stream = ", width, "," , height);
global_frame = new cv.Mat(height, width, cv.CV_8UC4);
global_rgb_frame = new cv.Mat(height, width, cv.CV_8UC3);
global_camera.setAttribute("width", width);
global_camera.setAttribute("height", height);
global_buffer = _malloc(global_rgb_frame.rows * global_rgb_frame.cols * 3);
global_camera.onloadedmetadata = function(e) {
global_camera.play();
//! [Open a camera stream]
global_cap = new cv.VideoCapture(global_camera); //*/
};
});
}catch(err){
console.log("err:", err, ", err messages:", err.message);
}
之后,在您的 cpp 文件中注册函数。
EM_JS(unsigned char*, capture_frame, (), {
return captureFrame();
})
将缓冲区转换为QImage并在QLabel上绘制
auto *buffer = capture_frame();
auto const width = get_frame_width(); //access js global variable
auto const height = get_frame_height(); //access js global variable
qDebug()<<__func__<<"width x height = "<<width<<", "<<height;
qDebug()<<__func__<<"buffer values = "<<buffer[0]<<", "<<buffer[1]<<", "<<buffer[2];
cv::Mat frame(height, width, CV_8UC3, buffer);
QImage img(frame.data, frame.cols, frame.rows,
static_cast<int>(frame.step[0]), QImage::Format_RGB888);
ui->labelImage->setPixmap(QPixmap::fromImage(img.copy()));
正在为浏览器写一个webapp(wasm的概念证明),想return将网络摄像头捕获的垫子返回到c++站点以便处理图像并显示它(ui 是Qt5写的).
我怎么 return 垫子?我找到的解决方案是
- 通过 img.ucharPtr 迭代像素,将像素值复制到字符串,然后 return 将其复制到 C++。但是js迭代pixel比较慢,不是一个理想的方案。
使用 imencode 将 Mat 编码为 jpg 并 return 它,问题是当我调用 imencode 时,它给我错误消息 "cv.imencode is not a function"。我从 this link, it is official site 下载 opencv.js。
function captureFrame() { console.log("capture frame start"); console.log("cols = ", global_frame.cols, ", rows = ", global_frame.rows, ", type = ", global_frame.type(), ", steps = ", global_frame.step[0]); global_cap.read(global_frame); // Read a frame from camera console.log("convert from rgba 2 rgb"); cv.cvtColor(global_frame, global_rgb_frame, cv.COLOR_RGBA2RGB); console.log("convert to byte64 string"); var base64_frame = cv.imencode(".jpg", global_rgb_frame).toString('base64'); var length_bytes = lengthBytesUTF8(base64_frame); var string_on_wasm_heap = _malloc(length_bytes); stringToUTF8(base64_frame, string_on_wasm_heap, length_bytes); return string_on_wasm_heap; }
如果我可以打开网络摄像头并直接从 C++ 访问框架,那就太完美了,但还没有找到方法。
如果我理解您的要求,您可以使用我从 Opencv thread.
中找到的下面的代码只需截取视频捕获的最新帧的屏幕截图,然后return将其作为 mat 对象。
VideoCapture cap("video1.mp4");
if( !cap.isOpened())
{
cout << "Cannot open the video file" << endl;
return -1;
}
double count = cap.get(CV_CAP_PROP_FRAME_COUNT); //get the frame count
cap.set(CV_CAP_PROP_POS_FRAMES,count-1); //Set index to last frame
namedWindow("Screen Cap", CV_WINDOW_AUTOSIZE);
while(1)
{
Mat frame;
bool success = cap.read(frame);
if (!success){
cout << "Cannot read frame " << endl;
break;
}
imshow("MyVideo", frame);
if(waitKey(0) == 27) break;
}
如果您想进行更深入的分析,我在此处找到了这些 docs。
我找到了解决办法,很简单。
function captureFrame()
{
console.log("capture frame start");
console.log("cols = ", global_frame.cols, ", rows = ", global_frame.rows, ", type = ", global_frame.type(), ", steps = ", global_frame.step[0]);
global_cap.read(global_frame);
console.log("convert from rgba 2 rgb");
cv.cvtColor(global_frame, global_rgb_frame, cv.COLOR_RGBA2RGB);
HEAPU8.set(global_rgb_frame.data, global_buffer);
console.log("buffer values = ", buffer[0], ", ", buffer[1], ", ", buffer[2]);
console.log("global_rgb_frame.data values = ", global_rgb_frame.data[0], ", ", global_rgb_frame.data[1], ", ", global_rgb_frame.data[2]);
return global_buffer;
}
global_buffer是我在启动相机时初始化的堆,为了减少内存分配的需要,我把它作为一个全局变量,这个丑陋的技巧在我需要的时候很常见用不同的语言交流。
try{
// Get a permission from user to use a camera.
navigator.mediaDevices.getUserMedia(constraints)
.then(function(stream) {
global_camera.srcObject = stream;
let {width, height} = stream.getTracks()[0].getSettings();
global_height = height;
global_width = width;
console.log("width x height of stream = ", width, "," , height);
global_frame = new cv.Mat(height, width, cv.CV_8UC4);
global_rgb_frame = new cv.Mat(height, width, cv.CV_8UC3);
global_camera.setAttribute("width", width);
global_camera.setAttribute("height", height);
global_buffer = _malloc(global_rgb_frame.rows * global_rgb_frame.cols * 3);
global_camera.onloadedmetadata = function(e) {
global_camera.play();
//! [Open a camera stream]
global_cap = new cv.VideoCapture(global_camera); //*/
};
});
}catch(err){
console.log("err:", err, ", err messages:", err.message);
}
之后,在您的 cpp 文件中注册函数。
EM_JS(unsigned char*, capture_frame, (), {
return captureFrame();
})
将缓冲区转换为QImage并在QLabel上绘制
auto *buffer = capture_frame();
auto const width = get_frame_width(); //access js global variable
auto const height = get_frame_height(); //access js global variable
qDebug()<<__func__<<"width x height = "<<width<<", "<<height;
qDebug()<<__func__<<"buffer values = "<<buffer[0]<<", "<<buffer[1]<<", "<<buffer[2];
cv::Mat frame(height, width, CV_8UC3, buffer);
QImage img(frame.data, frame.cols, frame.rows,
static_cast<int>(frame.step[0]), QImage::Format_RGB888);
ui->labelImage->setPixmap(QPixmap::fromImage(img.copy()));