35 #include <tensorflow/c/c_api.h> 56 unsigned char *graph_data =
NULL;
58 long size, bytes_read;
71 bytes_read =
avio_read(model_file_context, graph_data, size);
73 if (bytes_read != size){
78 graph_buf = TF_NewBuffer();
79 graph_buf->data = (
void *)graph_data;
80 graph_buf->length =
size;
104 return TF_AllocateTensor(dt, input_dims, 4,
105 input_dims[1] * input_dims[2] * input_dims[3] * size);
115 tf_output.oper = TF_GraphOperationByName(tf_model->
graph, input_name);
120 input->
dt = TF_OperationOutputType(tf_output);
122 status = TF_NewStatus();
123 TF_GraphGetTensorShape(tf_model->
graph, tf_output, dims, 4, status);
124 if (TF_GetCode(status) != TF_OK){
125 TF_DeleteStatus(status);
128 TF_DeleteStatus(status);
133 input->
width = dims[2];
142 TF_SessionOptions *sess_opts;
143 const TF_Operation *init_op = TF_GraphOperationByName(tf_model->
graph,
"init");
146 tf_model->
input.oper = TF_GraphOperationByName(tf_model->
graph, input_name);
147 if (!tf_model->
input.oper){
150 tf_model->
input.index = 0;
169 tf_model->
outputs[
i].oper = TF_GraphOperationByName(tf_model->
graph, output_names[
i]);
170 if (!tf_model->
outputs[i].oper){
199 sess_opts = TF_NewSessionOptions();
201 TF_DeleteSessionOptions(sess_opts);
202 if (TF_GetCode(tf_model->
status) != TF_OK)
213 if (TF_GetCode(tf_model->
status) != TF_OK)
224 TF_Buffer *graph_def;
225 TF_ImportGraphDefOptions *graph_opts;
231 tf_model->
graph = TF_NewGraph();
232 tf_model->
status = TF_NewStatus();
233 graph_opts = TF_NewImportGraphDefOptions();
234 TF_GraphImportGraphDef(tf_model->
graph, graph_def, graph_opts, tf_model->
status);
235 TF_DeleteImportGraphDefOptions(graph_opts);
236 TF_DeleteBuffer(graph_def);
237 if (TF_GetCode(tf_model->
status) != TF_OK){
238 TF_DeleteGraph(tf_model->
graph);
239 TF_DeleteStatus(tf_model->
status);
246 #define NAME_BUFFER_SIZE 256 252 TF_OperationDescription *op_desc;
254 int64_t strides[] = {1, 1, 1, 1};
265 op_desc = TF_NewOperation(tf_model->
graph,
"Const", name_buffer);
266 TF_SetAttrType(op_desc,
"dtype", TF_FLOAT);
272 tensor = TF_AllocateTensor(TF_FLOAT, dims, dims_len, size *
sizeof(
float));
273 memcpy(TF_TensorData(tensor), params->
kernel, size *
sizeof(
float));
274 TF_SetAttrTensor(op_desc,
"value", tensor, tf_model->
status);
275 if (TF_GetCode(tf_model->
status) != TF_OK){
278 op = TF_FinishOperation(op_desc, tf_model->
status);
279 if (TF_GetCode(tf_model->
status) != TF_OK){
284 op_desc = TF_NewOperation(tf_model->
graph,
"Transpose", name_buffer);
286 TF_AddInput(op_desc, input);
287 input.oper = transpose_op;
288 TF_AddInput(op_desc, input);
289 TF_SetAttrType(op_desc,
"T", TF_FLOAT);
290 TF_SetAttrType(op_desc,
"Tperm", TF_INT32);
291 op = TF_FinishOperation(op_desc, tf_model->
status);
292 if (TF_GetCode(tf_model->
status) != TF_OK){
297 op_desc = TF_NewOperation(tf_model->
graph,
"Conv2D", name_buffer);
298 input.oper = *cur_op;
299 TF_AddInput(op_desc, input);
301 TF_AddInput(op_desc, input);
302 TF_SetAttrType(op_desc,
"T", TF_FLOAT);
303 TF_SetAttrIntList(op_desc,
"strides", strides, 4);
304 TF_SetAttrString(op_desc,
"padding",
"VALID", 5);
305 *cur_op = TF_FinishOperation(op_desc, tf_model->
status);
306 if (TF_GetCode(tf_model->
status) != TF_OK){
311 op_desc = TF_NewOperation(tf_model->
graph,
"Const", name_buffer);
312 TF_SetAttrType(op_desc,
"dtype", TF_FLOAT);
315 tensor = TF_AllocateTensor(TF_FLOAT, dims, dims_len, params->
output_num *
sizeof(
float));
316 memcpy(TF_TensorData(tensor), params->
biases, params->
output_num *
sizeof(
float));
317 TF_SetAttrTensor(op_desc,
"value", tensor, tf_model->
status);
318 if (TF_GetCode(tf_model->
status) != TF_OK){
321 op = TF_FinishOperation(op_desc, tf_model->
status);
322 if (TF_GetCode(tf_model->
status) != TF_OK){
327 op_desc = TF_NewOperation(tf_model->
graph,
"BiasAdd", name_buffer);
328 input.oper = *cur_op;
329 TF_AddInput(op_desc, input);
331 TF_AddInput(op_desc, input);
332 TF_SetAttrType(op_desc,
"T", TF_FLOAT);
333 *cur_op = TF_FinishOperation(op_desc, tf_model->
status);
334 if (TF_GetCode(tf_model->
status) != TF_OK){
341 op_desc = TF_NewOperation(tf_model->
graph,
"Relu", name_buffer);
344 op_desc = TF_NewOperation(tf_model->
graph,
"Tanh", name_buffer);
347 op_desc = TF_NewOperation(tf_model->
graph,
"Sigmoid", name_buffer);
352 input.oper = *cur_op;
353 TF_AddInput(op_desc, input);
354 TF_SetAttrType(op_desc,
"T", TF_FLOAT);
355 *cur_op = TF_FinishOperation(op_desc, tf_model->
status);
356 if (TF_GetCode(tf_model->
status) != TF_OK){
366 TF_OperationDescription *op_desc;
371 op_desc = TF_NewOperation(tf_model->
graph,
"DepthToSpace", name_buffer);
372 input.oper = *cur_op;
374 TF_AddInput(op_desc, input);
375 TF_SetAttrType(op_desc,
"T", TF_FLOAT);
376 TF_SetAttrInt(op_desc,
"block_size", params->
block_size);
377 *cur_op = TF_FinishOperation(op_desc, tf_model->
status);
378 if (TF_GetCode(tf_model->
status) != TF_OK){
390 TF_OperationDescription *op_desc;
398 op_desc = TF_NewOperation(tf_model->
graph,
"Const", name_buffer);
399 TF_SetAttrType(op_desc,
"dtype", TF_INT32);
400 tensor = TF_AllocateTensor(TF_INT32, pads_shape, 2, 4 * 2 *
sizeof(
int32_t));
401 pads = (
int32_t *)TF_TensorData(tensor);
410 TF_SetAttrTensor(op_desc,
"value", tensor, tf_model->
status);
411 if (TF_GetCode(tf_model->
status) != TF_OK){
414 op = TF_FinishOperation(op_desc, tf_model->
status);
415 if (TF_GetCode(tf_model->
status) != TF_OK){
419 op_desc = TF_NewOperation(tf_model->
graph,
"MirrorPad",
"mirror_pad");
420 input.oper = *cur_op;
422 TF_AddInput(op_desc, input);
424 TF_AddInput(op_desc, input);
425 TF_SetAttrType(op_desc,
"T", TF_FLOAT);
426 TF_SetAttrType(op_desc,
"Tpaddings", TF_INT32);
427 TF_SetAttrString(op_desc,
"mode",
"SYMMETRIC", 9);
428 *cur_op = TF_FinishOperation(op_desc, tf_model->
status);
429 if (TF_GetCode(tf_model->
status) != TF_OK){
441 TF_OperationDescription *op_desc;
448 op_desc = TF_NewOperation(tf_model->
graph,
"Const", name_buffer);
449 TF_SetAttrType(op_desc,
"dtype", TF_FLOAT);
450 tensor = TF_AllocateTensor(TF_FLOAT,
NULL, 0, TF_DataTypeSize(TF_FLOAT));
451 y = (
float *)TF_TensorData(tensor);
453 TF_SetAttrTensor(op_desc,
"value", tensor, tf_model->
status);
454 if (TF_GetCode(tf_model->
status) != TF_OK){
457 op = TF_FinishOperation(op_desc, tf_model->
status);
458 if (TF_GetCode(tf_model->
status) != TF_OK){
463 op_desc = TF_NewOperation(tf_model->
graph,
"Maximum", name_buffer);
464 input.oper = *cur_op;
466 TF_AddInput(op_desc, input);
468 TF_AddInput(op_desc, input);
469 TF_SetAttrType(op_desc,
"T", TF_FLOAT);
470 *cur_op = TF_FinishOperation(op_desc, tf_model->
status);
471 if (TF_GetCode(tf_model->
status) != TF_OK){
481 TF_OperationDescription *op_desc;
483 TF_Operation *transpose_op;
487 int64_t transpose_perm_shape[] = {4};
488 int64_t input_shape[] = {1, -1, -1, -1};
499 tf_model->
graph = TF_NewGraph();
500 tf_model->
status = TF_NewStatus();
502 #define CLEANUP_ON_ERROR(tf_model) \ 504 TF_DeleteGraph(tf_model->graph); \ 505 TF_DeleteStatus(tf_model->status); \ 509 op_desc = TF_NewOperation(tf_model->
graph,
"Placeholder",
"x");
510 TF_SetAttrType(op_desc,
"dtype", TF_FLOAT);
511 TF_SetAttrShape(op_desc,
"shape", input_shape, 4);
512 op = TF_FinishOperation(op_desc, tf_model->
status);
513 if (TF_GetCode(tf_model->
status) != TF_OK){
517 op_desc = TF_NewOperation(tf_model->
graph,
"Const",
"transpose_perm");
518 TF_SetAttrType(op_desc,
"dtype", TF_INT32);
519 tensor = TF_AllocateTensor(TF_INT32, transpose_perm_shape, 1, 4 *
sizeof(
int32_t));
520 transpose_perm = (
int32_t *)TF_TensorData(tensor);
521 transpose_perm[0] = 1;
522 transpose_perm[1] = 2;
523 transpose_perm[2] = 3;
524 transpose_perm[3] = 0;
525 TF_SetAttrTensor(op_desc,
"value", tensor, tf_model->
status);
526 if (TF_GetCode(tf_model->
status) != TF_OK){
529 transpose_op = TF_FinishOperation(op_desc, tf_model->
status);
531 for (layer = 0; layer < conv_network->
layers_num; ++layer){
561 op_desc = TF_NewOperation(tf_model->
graph,
"Identity",
"y");
564 TF_AddInput(op_desc, input);
565 TF_FinishOperation(op_desc, tf_model->
status);
566 if (TF_GetCode(tf_model->
status) != TF_OK){
600 model->
model = (
void *)tf_model;
629 if (TF_GetCode(tf_model->
status) != TF_OK){
633 for (uint32_t
i = 0;
i < nb; ++
i) {
649 tf_model = (
TFModel *)(*model)->model;
651 TF_DeleteGraph(tf_model->
graph);
658 TF_DeleteStatus(tf_model->
status);
int avio_open(AVIOContext **s, const char *url, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
int64_t avio_size(AVIOContext *s)
Get the filesize.
union DnnLayerMaximumParams::@202 val
static TF_Buffer * read_graph(const char *model_filename)
DNNActivationFunc activation
DNN inference functions interface for native backend.
#define AVIO_FLAG_READ
read-only
DNNModel * ff_dnn_load_model_tf(const char *model_filename)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
DNN inference functions interface for TensorFlow backend.
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
DNNReturnType(* get_input)(void *model, DNNData *input, const char *input_name)
#define i(width, name, range_min, range_max)
static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op, DnnLayerMaximumParams *params, const int layer)
static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename)
simple assert() macros that are a bit more flexible than ISO C assert().
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static DNNReturnType get_input_tf(void *model, DNNData *input, const char *input_name)
static DNNReturnType load_native_model(TFModel *tf_model, const char *model_filename)
DNN inference functions interface for native backend.
DNN inference functions interface for native backend.
void ff_dnn_free_model_native(DNNModel **model)
static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_op, TF_Operation **cur_op, ConvolutionalParams *params, const int layer)
DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNData *outputs, uint32_t nb_output)
static TF_Tensor * allocate_input_tensor(const DNNData *input)
static void free_buffer(void *data, size_t length)
static DNNReturnType add_depth_to_space_layer(TFModel *tf_model, TF_Operation **cur_op, DepthToSpaceParams *params, const int layer)
layer pad (equivalent to tf.pad) for native backend.
TF_Tensor ** output_tensors
static int op(uint8_t **dst, const uint8_t *dst_end, GetByteContext *gb, int pixel, int count, int *x, int width, int linesize)
Perform decode operation.
void ff_dnn_free_model_tf(DNNModel **model)
static void transpose_perm(int16_t *out, int16_t *in, int num_vect, const uint8_t line_len[2], int length_div)
Interpret the input data as in the following table:
DNNModel * ff_dnn_load_model_native(const char *model_filename)
DNNReturnType(* set_input_output)(void *model, DNNData *input, const char *input_name, const char **output_names, uint32_t nb_output)
static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op, LayerPadParams *params, const int layer)
#define av_malloc_array(a, b)
static DNNReturnType set_input_output_tf(void *model, DNNData *input, const char *input_name, const char **output_names, uint32_t nb_output)
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
#define CLEANUP_ON_ERROR(tf_model)
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().