1
0
Fork 0

libavfilter/dnn_bakcend_openvino: Add automatic input/output detection

Now when using openvino backend, user doesn't need to set input/output
names in command line. Model ports will be automatically detected.

For example:
ffmpeg -i input.png -vf \
dnn_detect=dnn_backend=openvino:model=model.xml:input=image:\
output=detection_out -y output.png

can be simplified to:
ffmpeg -i input.png -vf dnn_detect=dnn_backend=openvino:model=model.xml\
 -y output.png

Signed-off-by: Wenbin Chen <wenbin.chen@intel.com>
Reviewed-by: Guo Yejun <yejun.guo@intel.com>
This commit is contained in:
Wenbin Chen 2024-01-17 15:21:49 +08:00 committed by Guo Yejun
parent 0c517fcbe8
commit c695de56b5
2 changed files with 67 additions and 18 deletions

View File

@ -205,6 +205,7 @@ static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
ov_tensor_t* tensor = NULL;
ov_shape_t input_shape = {0};
ov_element_type_e precision;
char *port_name;
#else
dimensions_t dims;
precision_e precision;
@ -223,11 +224,23 @@ static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
ov_output_const_port_free(ov_model->input_port);
ov_model->input_port = NULL;
}
status = ov_model_const_input_by_name(ov_model->ov_model, task->input_name, &ov_model->input_port);
if (task->input_name)
status = ov_model_const_input_by_name(ov_model->ov_model, task->input_name, &ov_model->input_port);
else
status = ov_model_const_input(ov_model->ov_model, &ov_model->input_port);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
return ov2_map_error(status, NULL);
}
status = ov_port_get_any_name(ov_model->input_port, &port_name);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to get input port name.\n");
return ov2_map_error(status, NULL);
}
av_log(ctx, AV_LOG_VERBOSE, "OpenVINO model input: %s\n", port_name);
ov_free(port_name);
port_name = NULL;
status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
@ -620,7 +633,10 @@ static int init_model_ov(OVModel *ov_model, const char *input_name, const char *
goto err;
}
status = ov_preprocess_prepostprocessor_get_input_info_by_name(ov_model->preprocess, input_name, &ov_model->input_info);
if (input_name)
status = ov_preprocess_prepostprocessor_get_input_info_by_name(ov_model->preprocess, input_name, &ov_model->input_info);
else
status = ov_preprocess_prepostprocessor_get_input_info(ov_model->preprocess, &ov_model->input_info);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to get input info from preprocess.\n");
ret = ov2_map_error(status, NULL);
@ -673,10 +689,24 @@ static int init_model_ov(OVModel *ov_model, const char *input_name, const char *
goto err;
}
if (!nb_outputs) {
size_t output_size;
status = ov_model_outputs_size(ov_model->ov_model, &output_size);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to get output size.\n");
ret = ov2_map_error(status, NULL);
goto err;
}
nb_outputs = output_size;
}
ov_model->nb_outputs = nb_outputs;
for (int i = 0; i < nb_outputs; i++) {
status = ov_preprocess_prepostprocessor_get_output_info_by_name(
ov_model->preprocess, output_names[i], &ov_model->output_info);
if (output_names)
status = ov_preprocess_prepostprocessor_get_output_info_by_name(
ov_model->preprocess, output_names[i], &ov_model->output_info);
else
status = ov_preprocess_prepostprocessor_get_output_info_by_index(
ov_model->preprocess, i, &ov_model->output_info);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to get output info from preprocess.\n");
ret = ov2_map_error(status, NULL);
@ -758,12 +788,25 @@ static int init_model_ov(OVModel *ov_model, const char *input_name, const char *
}
for (int i = 0; i < nb_outputs; i++) {
status = ov_model_const_output_by_name(ov_model->ov_model, output_names[i],
&ov_model->output_ports[i]);
char *port_name;
if (output_names)
status = ov_model_const_output_by_name(ov_model->ov_model, output_names[i],
&ov_model->output_ports[i]);
else
status = ov_model_const_output_by_index(ov_model->ov_model, i,
&ov_model->output_ports[i]);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to get output port %s.\n", output_names[i]);
goto err;
}
status = ov_port_get_any_name(ov_model->output_ports[i], &port_name);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to get output port name.\n");
goto err;
}
av_log(ctx, AV_LOG_VERBOSE, "OpenVINO model outputs: %s\n", port_name);
ov_free(port_name);
port_name = NULL;
}
//compile network
status = ov_core_compile_model(ov_model->core, ov_model->ov_model, device, 0, &ov_model->compiled_model);
@ -1044,7 +1087,10 @@ static int get_input_ov(void *model, DNNData *input, const char *input_name)
ov_element_type_e precision;
int64_t* dims;
ov_status_e status;
status = ov_model_const_input_by_name(ov_model->ov_model, input_name, &ov_model->input_port);
if (input_name)
status = ov_model_const_input_by_name(ov_model->ov_model, input_name, &ov_model->input_port);
else
status = ov_model_const_input(ov_model->ov_model, &ov_model->input_port);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
return ov2_map_error(status, NULL);
@ -1241,7 +1287,7 @@ static int get_output_ov(void *model, const char *input_name, int input_width, i
OVRequestItem *request;
DNNExecBaseParams exec_params = {
.input_name = input_name,
.output_names = &output_name,
.output_names = output_name ? &output_name : NULL,
.nb_output = 1,
.in_frame = NULL,
.out_frame = NULL,
@ -1297,7 +1343,7 @@ static int get_output_ov(void *model, const char *input_name, int input_width, i
}
if (!ov_model->exe_network) {
#endif
ret = init_model_ov(ov_model, input_name, &output_name, 1);
ret = init_model_ov(ov_model, input_name, output_name ? &output_name : NULL, 1);
if (ret != 0) {
av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
return ret;

View File

@ -57,15 +57,17 @@ int ff_dnn_init(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *fil
av_log(filter_ctx, AV_LOG_ERROR, "model file for network is not specified\n");
return AVERROR(EINVAL);
}
if (!ctx->model_inputname) {
av_log(filter_ctx, AV_LOG_ERROR, "input name of the model network is not specified\n");
return AVERROR(EINVAL);
}
ctx->model_outputnames = separate_output_names(ctx->model_outputnames_string, "&", &ctx->nb_outputs);
if (!ctx->model_outputnames) {
av_log(filter_ctx, AV_LOG_ERROR, "could not parse model output names\n");
return AVERROR(EINVAL);
if (ctx->backend_type == DNN_TF) {
if (!ctx->model_inputname) {
av_log(filter_ctx, AV_LOG_ERROR, "input name of the model network is not specified\n");
return AVERROR(EINVAL);
}
ctx->model_outputnames = separate_output_names(ctx->model_outputnames_string, "&", &ctx->nb_outputs);
if (!ctx->model_outputnames) {
av_log(filter_ctx, AV_LOG_ERROR, "could not parse model output names\n");
return AVERROR(EINVAL);
}
}
ctx->dnn_module = ff_get_dnn_module(ctx->backend_type, filter_ctx);
@ -113,8 +115,9 @@ int ff_dnn_get_input(DnnContext *ctx, DNNData *input)
int ff_dnn_get_output(DnnContext *ctx, int input_width, int input_height, int *output_width, int *output_height)
{
char * output_name = ctx->model_outputnames ? ctx->model_outputnames[0] : NULL;
return ctx->model->get_output(ctx->model->model, ctx->model_inputname, input_width, input_height,
(const char *)ctx->model_outputnames[0], output_width, output_height);
(const char *)output_name, output_width, output_height);
}
int ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame)