点击上方蓝字关注我们
微信公众号:OpenCV学堂
关注获取更多计算机视觉与深度学习知识
前言
导入头文件
一行代码就获取C++ SDK支持
创建推理推理请求
ov::Core ie;
ov::CompiledModel compiled_model = ie.compile_model(settings.getWeight_file(), "CPU");
infer_request = compiled_model.create_infer_request();
ov::Core ie;
auto model = ie.read_model(settings.getWeight_file());
auto inputs = model->inputs();
// change the input as dynamic shape support
for(auto input_one : inputs){
auto input_shape = input_one.get_partial_shape();
input_shape[0] = 1;
input_shape[1] = 3;
input_shape[2] = -1;
input_shape[3] = -1;
}
ov::CompiledModel compiled_model = ie.compile_model(model, "CPU");
infer_request = compiled_model.create_infer_request();
ov::Core ie;
std::cout<<"model file: "<
std ::endl;< span="">auto model = ie.read_model(settings.getWeight_file());
std::cout<<"read model file finished!"<<std::endl;< span="">
// setting input data format and layout
ov::preprocess::PrePostProcessor ppp(model);
ov::preprocess::InputInfo& inputInfo0 = ppp.input(0);
inputInfo0.tensor().set_element_type(ov::element::u8);
inputInfo0.tensor().set_layout({ "NCHW" });
inputInfo0.model().set_layout("NCHW");
ov::preprocess::InputInfo& inputInfo1 = ppp.input(1);
inputInfo1.tensor().set_element_type(ov::element::u8);
inputInfo1.tensor().set_layout({ "NCHW" });
inputInfo1.model().set_layout("NCHW");
model = ppp.build();
ov::CompiledModel compiled_model = ie.compile_model(model, "CPU");
this->infer_request = compiled_model.create_infer_request();
导出IR格式模型
ov_model = ov.convert_model("D:/python/my_yolov8_train_demo/yolov8n.onnx",
input=[[1, 3, 640, 640]])
ov.save_model(ov_model, str("D:/bird_test/back1/yolov8_ov.xml"))
图像预处理
ov::preprocess::PrePostProcessor ppp(model);
ov::preprocess::InputInfo& input = ppp.input(tensor_name);
// we only need to know where is C dimension
input.model().set_layout("...C");
// specify scale and mean values, order of operations is important
input.preprocess().mean(116.78f).scale({ 57.21f, 57.45f, 57.73f });
// insert preprocessing operations to the 'model'
model = ppp.build();
// 预处理
cv::Mat blob_image;
resize(image, blob_image, cv::Size(input_w, input_h));
blob_image.convertTo(blob_image, CV_32F);
blob_image = blob_image / 255.0;
或者
cv::Mat blob = cv::dnn::blobFromImage(image, 1 / 255.0, cv::Size(640, 640), cv::Scalar(0, 0, 0), true, false);
预测推理
this->infer_request.infer();
异步方式 + Callback
auto restart_once = true;
infer_request.set_callback([&, restart_once](std::exception_ptr exception_ptr) mutable {
if (exception_ptr) {
// procces exception or rethrow it.
std::rethrow_exception(exception_ptr);
} else {
// Extract inference result
ov::Tensor output_tensor = infer_request.get_output_tensor();
// Restart inference if needed
if (restart_once) {
infer_request.start_async();
restart_once = false;
}
}
});
// Start inference without blocking current thread
infer_request.start_async();
// Get inference status immediately
bool status = infer_request.wait_for(std::chrono::milliseconds{0});
// Wait for one milisecond
status = infer_request.wait_for(std::chrono::milliseconds{1});
// Wait for inference completion
infer_request.wait();
cv::Mat与ov::Tensor转换
bgr.convertTo(bgr, CV_32FC3);
gray.convertTo(gray, CV_32F, 1.0/255);
ov::Tensor blob1(input_tensor_1.get_element_type(), input_tensor_1.get_shape(), (float *)bgr.data);
ov::Tensor blob2(input_tensor_2.get_element_type(), input_tensor_2.get_shape(), (float *)gray.data);
const float* prob = (float*)output.data();
const ov::Shape outputDims = output.get_shape();
size_t numRows = outputDims[1];
size_t numCols = outputDims[2];
// 通道数为1 用这行
cv::Mat detOut(numRows, numCols, CV_32F, (float*)prob);
// 通道数为3 用这行
cv::Mat detOut(numRows, numCols, CV_32FC3, (float*)prob);
如果输出是1xHW的三维张量,直接用下面这样:
cv::Mat detOut(numRows, numCols, CV_32F, (float*)prob);
从此你就真的解锁了OpenVINO C++ 模型推理部署的各种细节了。
推荐阅读
OpenCV4.8+YOLOv8对象检测C++推理演示
ZXING+OpenCV打造开源条码检测应用
攻略 | 学习深度学习只需要三个月的好方法
三行代码实现 TensorRT8.6 C++ 深度学习模型部署
实战 | YOLOv8+OpenCV 实现DM码定位检测与解析
对象检测边界框损失 – 从IOU到ProbIOU
初学者必看 | 学习深度学习的五个误区