你的浏览器版本过低,可能导致网站不能正常访问!
为了你能正常使用网站功能,请使用这些浏览器。

cubemax ai转换 stm32h7

[复制链接]
xiaoyao12138 提问时间:2020-12-8 11:22 /
stm32h747xi芯片
cubemax导入模型生成成功,但代码中app_x-cube-ai.c只有以下报错信息,望哪位仁兄可以拯救下我。
报错:

The following has evaluated to null or missing:
==> ModelNameList  [in template "app_x-cube-ai_c.ftl" at line 296, column 20]

----
Tip: If the failing expression is known to legally refer to something that's sometimes null or missing, either specify a default value like myOptionalVar!myDefault, or use [#if myOptionalVar??]when-present[#else]when-missing[/#if]. (These only cover the last step of the expression; to cover the whole expression, use parenthesis: (myOptionalVar.foo)!myDefault, (myOptionalVar.foo)??
----

----
FTL stack trace ("~" means nesting-related):
        - Failed at: ${ModelNameList[0]}  [in template "app_x-cube-ai_c.ftl" at line 296, column 18]
----

Java stack trace (for programmers):
----
freemarker.core.InvalidReferenceException: [... Exception message was already printed; see it above ...]
        at freemarker.core.InvalidReferenceException.getInstance(InvalidReferenceException.java:134)


收藏 评论4 发布时间:2020-12-8 11:22

举报

4个回答
废鱼 回答时间:2020-12-8 13:35:41
楼主是否方便把这个文件上传一下,in template "app_x-cube-ai_c.ftl" at line 296, column 20。看一下这里是不是有语法错误。
xiaoyao12138 回答时间:2020-12-8 14:02:39
哦哦,不好意思。附件在这。
但同样的模型,我用F407ZGTX 这个芯片就可以生成啊

app_x-cube-ai_c.zip

下载

6.22 KB, 下载次数: 0, 下载积分: ST金币 -1

app_x-cube-ai_c.ftl

xiaoyao12138 回答时间:2020-12-8 14:04:55
安 发表于 2020-12-8 13:35
楼主是否方便把这个文件上传一下,in template "app_x-cube-ai_c.ftl" at line 296, column 20。看一下这里 ...

不好意思哈,没放这个代码上来但我用F407ZGTX这个芯片,同样模型就可以生成。

感觉可能不是代码问题,不懂是不是配置问题,你成功生成吗?

app_x-cube-ai_c.zip

下载

6.22 KB, 下载次数: 3, 下载积分: ST金币 -1

app_x-cube-ai_c.ftl

xiaoyao12138 回答时间:2020-12-8 17:26:55
安 发表于 2020-12-8 13:35
楼主是否方便把这个文件上传一下,in template "app_x-cube-ai_c.ftl" at line 296, column 20。看一下这里 ...

[#ftl]
[#assign useAI_SYSTEM_PERFORMANCE = false]
[#assign useAI_VALIDATION = false]
[#assign useAI_TEMPLATE = false]
[#assign useAI_BSP = false]
[#if RTEdatas??]
  [#list RTEdatas as define]
    [#if define?contains("AI_SYSTEM_PERFORMANCE")]
      [#assign useAI_BSP = true]
      [#assign useAI_SYSTEM_PERFORMANCE = true]
    [/#if]
    [#if define?contains("AI_VALIDATION")]
      [#assign useAI_BSP = true]
      [#assign useAI_VALIDATION = true]
    [/#if]
    [#if define?contains("AI_ApplicationTemplate")]
      [#assign useAI_BSP = false]
          [#assign useAI_TEMPLATE = true]
    [/#if]
  [/#list]
[/#if]

[#assign useAI_RELOC = false]

[#attempt]
[#list configs as config]
    [#assign ModelNameList = config.peripheralParams.get("STMicroelectronics.X-CUBE-AI.5.2.0").get("ModelNameList").split(",")]
    [#assign InitF = config.peripheralParams.get("STMicroelectronics.X-CUBE-AI.5.2.0").get("initFunctions")]
        [#if config.peripheralParams.get("STMicroelectronics.X-CUBE-AI.5.2.0").get("relocatableNetwork")??]
                [#assign useAI_RELOC = true]
        [/#if]
        [#if InitF??]
                [#assign InitFunctions = InitF.split(":")]
        [/#if]
        [#assign NETWORK = ModelNameList[0]?upper_case]
[/#list]
[#recover]
[/#attempt]
#ifdef __cplusplus
extern "C" {
#endif
/**
  ******************************************************************************
  * @file           : app_x-cube-ai.c
  * @brief          : AI program body
  ******************************************************************************
  * This notice applies to any and all portions of this file
  * that are not between comment pairs USER CODE BEGIN and
  * USER CODE END. Other portions of this file, whether
  * inserted by the user or by software development tools
  * are owned by their respective copyright owners.
  *
  * Copyright (c) 2018 STMicroelectronics International N.V.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted, provided that the following conditions are met:
  *
  * 1. Redistribution of source code must retain the above copyright notice,
  *    this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright notice,
  *    this list of conditions and the following disclaimer in the documentation
  *    and/or other materials provided with the distribution.
  * 3. Neither the name of STMicroelectronics nor the names of other
  *    contributors to this software may be used to endorse or promote products
  *    derived from this software without specific written permission.
  * 4. This software, including modifications and/or derivative works of this
  *    software, must execute solely and exclusively on microcontroller or
  *    microprocessor devices manufactured by or for STMicroelectronics.
  * 5. Redistribution and use of this software other than as permitted under
  *    this license is void and will automatically terminate your rights under
  *    this license.
  *
  * THIS SOFTWARE IS PROVIDED BY STMICROELECTRONICS AND CONTRIBUTORS "AS IS"
  * AND ANY EXPRESS, IMPLIED OR STATUTORY WARRANTIES, INCLUDING, BUT NOT
  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
  * PARTICULAR PURPOSE AND NON-INFRINGEMENT OF THIRD PARTY INTELLECTUAL PROPERTY
  * RIGHTS ARE DISCLAIMED TO THE FULLEST EXTENT PERMITTED BY LAW. IN NO EVENT
  * SHALL STMICROELECTRONICS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
  * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  ******************************************************************************
  */
[#if useAI_TEMPLATE]
[#if useAI_RELOC]
/*
  * Description
  *   v1.0 - Basic template to show how to instantiate a relocatable binary
  *          model. Only one input and one output is supported. It illustrates
  *          how to use the ai_rel_network_XXX() API.
  *          Re-target of the printf function is out-of-scope.
  *
[#else]
/*
  * Description
  *   v1.0 - Minimum template to show how to use the Embedded Client API
  *          model. Only one input and one output is supported. All
  *          memory resources are allocated statically (AI_NETWORK_XX, defines
  *          are used).
  *          Re-target of the printf function is out-of-scope.
  *
[/#if]
  *   For more information, see the embeded documentation:
  *
  *       [1] %X_CUBE_AI_DIR%/Documentation/index.html
  *
  *   X_CUBE_AI_DIR indicates the location where the X-CUBE-AI pack is installed
  *   typical : C:\Users\<user_name>\STM32Cube\Repository\STMicroelectronics\X-CUBE-AI\5.2.0
  */
[/#if]
/* Includes ------------------------------------------------------------------*/
/* System headers */
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <inttypes.h>
#include <string.h>

#include "app_x-cube-ai.h"
[#if useAI_BSP]
#include "bsp_ai.h"
[#else]
#include "main.h"
[/#if]
[#if useAI_SYSTEM_PERFORMANCE]
#include "aiSystemPerformance.h"
[/#if]
[#if useAI_VALIDATION]
#include "aiValidation.h"
[/#if]
#include "ai_datatypes_defines.h"

/* USER CODE BEGIN includes */
/* USER CODE END includes */
[#if useAI_TEMPLATE]
[#if useAI_RELOC]
#include <ai_reloc_network.h>

/* Global AI objects */
static ai_handle ${ModelNameList[0]} = AI_HANDLE_NULL;
static ai_network_report ${ModelNameList[0]}_info;
static ai_rel_network_info ${ModelNameList[0]}_rt_info;

static void ai_log_err(const ai_error err, const char *fct)
{
  /* USER CODE BEGIN 0 */
  if (fct)
    printf("TEMPLATE - Error (%s) - type=0x%02x code=0x%02x\r\n", fct,
        err.type, err.code);
  else
    printf("TEMPLATE - Error - type=0x%02x code=0x%02x\r\n", err.type, err.code);

  do {} while (1);
  /* USER CODE END 0 */
}


static int ai_boostrap(const uint8_t *obj, uint8_t *ram_addr,
    ai_size ram_sz, ai_handle w_addr, ai_handle act_addr)
{
  ai_error err;
  ai_handle weights_addr;

  /* 1 - Create an instance (XIP mode)*/
  err = ai_rel_network_load_and_create(obj,
      ram_addr, ram_sz, AI_RELOC_RT_LOAD_MODE_XIP,
      &${ModelNameList[0]});
  if (err.type != AI_ERROR_NONE) {
    ai_log_err(err, "ai_rel_network_load_and_create");
    return -1;
  }

  /* 2 - Initialize the created instance */
  if (${ModelNameList[0]}_rt_info.weights)
    weights_addr = ${ModelNameList[0]}_rt_info.weights;
  else
    weights_addr = w_addr;

  if (!ai_rel_network_init(${ModelNameList[0]},
      weights_addr, act_addr)) {
    err = ai_rel_network_get_error(${ModelNameList[0]});
    ai_log_err(err, "ai_rel_network_init");
    ai_rel_network_destroy(${ModelNameList[0]});
    ${ModelNameList[0]} = AI_HANDLE_NULL;
    return -2;
  }

  /* 3 - Retrieve the network info of the created instance */
  if (!ai_rel_network_get_info(${ModelNameList[0]}, &${ModelNameList[0]}_info)) {
    err = ai_rel_network_get_error(${ModelNameList[0]});
    ai_log_err(err, "ai_rel_network_get_info");
    ai_rel_network_destroy(${ModelNameList[0]});
    ${ModelNameList[0]} = AI_HANDLE_NULL;
    return -3;
  }

  return 0;
}


static int ai_run(void *data_in, void *data_out)
{
  ai_i32 batch;

  ai_buffer *ai_input = ${ModelNameList[0]}_info.inputs;
  ai_buffer *ai_output = ${ModelNameList[0]}_info.outputs;

  ai_input[0].data = AI_HANDLE_PTR(data_in);
  ai_output[0].data = AI_HANDLE_PTR(data_out);

  batch = ai_rel_network_run(${ModelNameList[0]}, ai_input, ai_output);
  if (batch != 1) {
    ai_log_err(ai_rel_network_get_error(${ModelNameList[0]}),
        "ai_rel_network_run");
    return -1;
  }

  return 0;
}

/* USER CODE BEGIN 2 */
int acquire_and_process_data(void * data)
{
  return 0;
}

int post_process(void * data)
{
  return 0;
}
/* USER CODE END 2 */

/* USER CODE BEGIN 10 */

/*
* The following code is based on the generated/specific
* network_img_rel.h/.c files. These files include a
* image of the relocatable binary image as a
* C-table which is included in the firmware image.
* This is a facility to use the model.
*
* For a normal case with the end-user application which
* requires the capability to update the model w/o
* re-flash the whole firmware (FOTA-like mechanism),
* the BIN_ADDRESS should be provided by the APP module in charge
* to update the model. It can be a fixed address.
* The requested RT RAM buffer (RAM_RT_ADDRESS/RAM_RT_SIZE)
* to instantiate the network can be also fixed or dynamically
* allocated. As for the management of the
* activations buffer, the sizes of the RT RAM and the activations
* buffer are model dependent.
*/

/* Include the image of the relocatable network */
#include <${ModelNameList[0]}_img_rel.h>

#define BIN_ADDRESS ai_${ModelNameList[0]}_reloc_img_get()

/* Allocate the rt ram buffer to install a model
*  - it should be aligned on 4-bytes
*/
AI_ALIGNED(4)
uint8_t reloc_rt_ram[AI_${NETWORK}_RELOC_RAM_SIZE_XIP];

#define RAM_RT_ADDRESS reloc_rt_ram
#define RAM_RT_SIZE    AI_${NETWORK}_RELOC_RAM_SIZE_XIP

/* Allocate the activations buffer to use the model
*  - it should be aligned on 4-bytes
*/
AI_ALIGNED(4)
uint8_t activations[AI_${NETWORK}_RELOC_ACTIVATIONS_SIZE];

#define ACT_ADDR  activations
#define ACT_SIZE  AI_${NETWORK}_RELOC_ACTIVATIONS_SIZE

/* In the case where the weights are not included in the
* binary object (generated in a separate file), the
* @ should be passed to initialize the instance.
*  - it should be aligned on 4-bytes
*/
#define WEIGHTS_ADD  0
#define WEIGHTS_SIZE AI_${NETWORK}_RELOC_WEIGHTS_SIZE

/* USER CODE END 10 */


[#else]

/* Global AI objects */
static ai_handle ${ModelNameList[0]} = AI_HANDLE_NULL;
static ai_network_report ${ModelNameList[0]}_info;

/* Global c-array to handle the activations buffer */
AI_ALIGNED(4)
static ai_u8 activations[AI_${NETWORK}_DATA_ACTIVATIONS_SIZE];

/*  In the case where "--allocate-inputs" option is used, memory buffer can be
*  used from the activations buffer. This is not mandatory.
*/
#if !defined(AI_${NETWORK}_INPUTS_IN_ACTIVATIONS)
/* Allocate data payload for input tensor */
AI_ALIGNED(4)
static ai_u8 in_data_s[AI_${NETWORK}_IN_1_SIZE_BYTES];
#endif

/*  In the case where "--allocate-outputs" option is used, memory buffer can be
*  used from the activations buffer. This is no mandatory.
*/
#if !defined(AI_${NETWORK}_OUTPUTS_IN_ACTIVATIONS)
/* Allocate data payload for the output tensor */
AI_ALIGNED(4)
static ai_u8 out_data_s[AI_${NETWORK}_OUT_1_SIZE_BYTES];
#endif

static void ai_log_err(const ai_error err, const char *fct)
{
  /* USER CODE BEGIN 0 */
  if (fct)
    printf("TEMPLATE - Error (%s) - type=0x%02x code=0x%02x\r\n", fct,
        err.type, err.code);
  else
    printf("TEMPLATE - Error - type=0x%02x code=0x%02x\r\n", err.type, err.code);

  do {} while (1);
  /* USER CODE END 0 */
}

static int ai_boostrap(ai_handle w_addr, ai_handle act_addr)
{
  ai_error err;

  /* 1 - Create an instance of the model */
  err = ai_${ModelNameList[0]}_create(&${ModelNameList[0]}, AI_${NETWORK}_DATA_CONFIG);
  if (err.type != AI_ERROR_NONE) {
    ai_log_err(err, "ai_${ModelNameList[0]}_create");
    return -1;
  }

  /* 2 - Initialize the instance */
  const ai_network_params params = {
      AI_${NETWORK}_DATA_WEIGHTS(w_addr),
      AI_${NETWORK}_DATA_ACTIVATIONS(act_addr) };

  if (!ai_${ModelNameList[0]}_init(${ModelNameList[0]}, &params)) {
      err = ai_${ModelNameList[0]}_get_error(${ModelNameList[0]});
      ai_log_err(err, "ai_${ModelNameList[0]}_init");
      return -1;
    }

  /* 3 - Retrieve the network info of the created instance */
  if (!ai_${ModelNameList[0]}_get_info(${ModelNameList[0]}, &${ModelNameList[0]}_info)) {
    err = ai_${ModelNameList[0]}_get_error(${ModelNameList[0]});
    ai_log_err(err, "ai_${ModelNameList[0]}_get_error");
    ai_${ModelNameList[0]}_destroy(${ModelNameList[0]});
    ${ModelNameList[0]} = AI_HANDLE_NULL;
    return -3;
  }

  return 0;
}

static int ai_run(void *data_in, void *data_out)
{
  ai_i32 batch;

  ai_buffer *ai_input = ${ModelNameList[0]}_info.inputs;
  ai_buffer *ai_output = ${ModelNameList[0]}_info.outputs;

  ai_input[0].data = AI_HANDLE_PTR(data_in);
  ai_output[0].data = AI_HANDLE_PTR(data_out);

  batch = ai_${ModelNameList[0]}_run(${ModelNameList[0]}, ai_input, ai_output);
  if (batch != 1) {
    ai_log_err(ai_${ModelNameList[0]}_get_error(${ModelNameList[0]}),
        "ai_${ModelNameList[0]}_run");
    return -1;
  }

  return 0;
}

/* USER CODE BEGIN 2 */
int acquire_and_process_data(void * data)
{
  return 0;
}

int post_process(void * data)
{
  return 0;
}
/* USER CODE END 2 */

[/#if]

[/#if]

/*************************************************************************
  *
  */
void ${fctName}(void)
{
[#if useAI_BSP]
    MX_UARTx_Init();
[/#if]
[#if InitFunctions??]
  [#list InitFunctions as initFunction]
    [#if initFunction?has_content]
      [#if initFunction?ends_with(".ftl")]
        [#include initFunction]
      [#else]
    ${initFunction}
      [/#if]
    [/#if]
  [/#list]
[/#if]
[#if useAI_SYSTEM_PERFORMANCE]
    aiSystemPerformanceInit();
[/#if]
[#if useAI_VALIDATION]
    aiValidationInit();
[/#if]
    /* USER CODE BEGIN 3 */
[#if useAI_TEMPLATE]
[#if useAI_RELOC]
  ai_error err;

  printf("\r\nTEMPLATE RELOC - initialization\r\n");

  err = ai_rel_network_rt_get_info(BIN_ADDRESS, &${ModelNameList[0]}_rt_info);
  if (err.type != AI_ERROR_NONE) {
    ai_log_err(err, "ai_rel_network_rt_get_info");
  } else {
    /* USER CODE BEGIN 11 */
    printf("Load a relocatable binary model, located at the address 0x%08x\r\n",
        (int)BIN_ADDRESS);
    printf(" model name                : %s\r\n", ${ModelNameList[0]}_rt_info.c_name);
    printf(" weights size              : %d bytes\r\n", (int)${ModelNameList[0]}_rt_info.weights_sz);
    printf(" activations size          : %d bytes (minimum)\r\n", (int)${ModelNameList[0]}_rt_info.acts_sz);
    printf(" compiled for a Cortex-Mx  : 0x%03X\r\n", (int)AI_RELOC_RT_GET_CPUID(${ModelNameList[0]}_rt_info.variant));
    printf(" FPU should be enabled     : %s\r\n", AI_RELOC_RT_FPU_USED(${ModelNameList[0]}_rt_info.variant)?"yes":"no");
    printf(" RT RAM minimum size       : %d bytes (%d bytes in COPY mode)\r\n", (int)${ModelNameList[0]}_rt_info.rt_ram_xip,
        (int)${ModelNameList[0]}_rt_info.rt_ram_copy);
    /* USER CODE END 11 */

    ai_boostrap(BIN_ADDRESS, RAM_RT_ADDRESS, RAM_RT_SIZE,
        WEIGHTS_ADD, ACT_ADDR);
  }
[#else]
  printf("\r\nTEMPLATE - initialization\r\n");

  ai_boostrap(ai_${ModelNameList[0]}_data_weights_get(), activations);
[/#if]
[/#if]
    /* USER CODE END 3 */
}

void ${fctProcessName}(void)
{
[#if useAI_SYSTEM_PERFORMANCE]
    aiSystemPerformanceProcess();
    HAL_Delay(1000); /* delay 1s */
[/#if]
[#if useAI_VALIDATION]
    aiValidationProcess();
[/#if]
    /* USER CODE BEGIN 4 */
[#if useAI_TEMPLATE]

  int res = -1;
  uint8_t *in_data = NULL;
  uint8_t *out_data = NULL;

  printf("TEMPLATE - run - main loop\r\n");

  if (${ModelNameList[0]}) {

    if ((${ModelNameList[0]}_info.n_inputs != 1) || (${ModelNameList[0]}_info.n_outputs != 1)) {
      ai_error err = {AI_ERROR_INVALID_PARAM, AI_ERROR_CODE_OUT_OF_RANGE};
      ai_log_err(err, "template code should be updated\r\n to support a model with multiple IO");
      return;
    }

    /* 1 - Set the I/O data buffer */

[#if useAI_RELOC]

    const ai_buffer_format fmt_in = AI_BUFFER_FORMAT(&${ModelNameList[0]}_info.inputs[0]);
    const ai_u32 size_in = AI_BUFFER_BYTE_SIZE(AI_BUFFER_SIZE(&${ModelNameList[0]}_info.inputs[0]), fmt_in);

    const ai_buffer_format fmt_out = AI_BUFFER_FORMAT(&${ModelNameList[0]}_info.outputs[0]);
    const ai_u32 size_out = AI_BUFFER_BYTE_SIZE(AI_BUFFER_SIZE(&${ModelNameList[0]}_info.outputs[0]), fmt_out);

    /*  In the case where "--allocate-inputs" option is used, memory buffer can be
     *  used from the activations buffer. This is not mandatory.
     */
    if (${ModelNameList[0]}_info.inputs[0].data)
      in_data = ${ModelNameList[0]}_info.inputs[0].data;
    else {
      in_data = malloc(size_in);
    }

    /*  In the case where "--allocate-outputs" option is used, memory buffer can be
     *  used from the activations buffer. This is no mandatory.
     */
    if (${ModelNameList[0]}_info.outputs[0].data)
      out_data = ${ModelNameList[0]}_info.outputs[0].data;
    else {
      out_data = malloc(size_out);
    }

[#else]

#if AI_${NETWORK}_INPUTS_IN_ACTIVATIONS
    in_data = ${ModelNameList[0]}_info.inputs[0].data;
#else
    in_data = in_data_s;
#endif

#if AI_${NETWORK}_OUTPUTS_IN_ACTIVATIONS
    out_data = ${ModelNameList[0]}_info.outputs[0].data;
#else
    out_data = out_data_s;
#endif

[/#if]

    if ((!in_data) || (!out_data)) {
      printf("TEMPLATE - I/O buffers are invalid\r\n");
      return;
    }

    /* 2 - main loop */
    do {
      /* 1 - acquire and pre-process input data */
      res = acquire_and_process_data(in_data);
      /* 2 - process the data - call inference engine */
      if (res == 0)
        res = ai_run(in_data, out_data);
      /* 3- post-process the predictions */
      if (res == 0)
        res = post_process(out_data);
    } while (res==0);
  }

  if (res) {
    ai_error err = {AI_ERROR_INVALID_STATE, AI_ERROR_CODE_NETWORK};
    ai_log_err(err, "Process has FAILED");
  }
[/#if]
    /* USER CODE END 4 */
}
[#if ModelNameList?size > 1 || useAI_SYSTEM_PERFORMANCE || useAI_VALIDATION]
/* Multiple network support --------------------------------------------------*/

#include <string.h>
#include "ai_datatypes_defines.h"

static const ai_network_entry_t networks[AI_MNETWORK_NUMBER] = {
[#list 0..ModelNameList?size-1 as i]
    [#assign NETWORK = ModelNameList?upper_case]
    {
        .name = (const char *)AI_${NETWORK}_MODEL_NAME,
        .config = AI_${NETWORK}_DATA_CONFIG,
        .ai_get_info = ai_${ModelNameList}_get_info,
        .ai_create = ai_${ModelNameList}_create,
        .ai_destroy = ai_${ModelNameList}_destroy,
        .ai_get_error = ai_${ModelNameList}_get_error,
        .ai_init = ai_${ModelNameList}_init,
        .ai_run = ai_${ModelNameList}_run,
        .ai_forward = ai_${ModelNameList}_forward,
        .ai_data_weights_get_default = ai_${ModelNameList}_data_weights_get,
        .params = { AI_${NETWORK}_DATA_WEIGHTS(0),
                AI_${NETWORK}_DATA_ACTIVATIONS(0)},
        .extActBufferStartAddr = AI_${NETWORK}_DATA_ACTIVATIONS_START_ADDR,
        .actBufferSize = AI_${NETWORK}_DATA_ACTIVATIONS_SIZE
    },
[/#list]
};

struct network_instance {
     const ai_network_entry_t *entry;
     ai_handle handle;
     ai_network_params params;
};

/* Number of instance is aligned on the number of network */
AI_STATIC struct network_instance gnetworks[AI_MNETWORK_NUMBER] = {0};

AI_DECLARE_STATIC
ai_bool ai_mnetwork_is_valid(const char* name,
        const ai_network_entry_t *entry)
{
    if (name && (strlen(entry->name) == strlen(name)) &&
            (strncmp(entry->name, name, strlen(entry->name)) == 0))
        return true;
    return false;
}

AI_DECLARE_STATIC
struct network_instance *ai_mnetwork_handle(struct network_instance *inst)
{
    for (int i=0; i<AI_MNETWORK_NUMBER; i++) {
        if ((inst) && (&gnetworks == inst))
            return inst;
        else if ((!inst) && (gnetworks.entry == NULL))
            return &gnetworks;
    }
    return NULL;
}

AI_DECLARE_STATIC
void ai_mnetwork_release_handle(struct network_instance *inst)
{
    for (int i=0; i<AI_MNETWORK_NUMBER; i++) {
        if ((inst) && (&gnetworks == inst)) {
            gnetworks.entry = NULL;
            return;
        }
    }
}

AI_API_ENTRY
const char* ai_mnetwork_find(const char *name, ai_int idx)
{
    const ai_network_entry_t *entry;

    for (int i=0; i<AI_MNETWORK_NUMBER; i++) {
        entry = &networks;
        if (ai_mnetwork_is_valid(name, entry))
            return entry->name;
        else {
            if (!idx--)
                return entry->name;
        }
    }
    return NULL;
}

AI_API_ENTRY
ai_error ai_mnetwork_create(const char *name, ai_handle* network,
        const ai_buffer* network_config)
{
    const ai_network_entry_t *entry;
    const ai_network_entry_t *found = NULL;
    ai_error err;
    struct network_instance *inst = ai_mnetwork_handle(NULL);

    if (!inst) {
        err.type = AI_ERROR_ALLOCATION_FAILED;
        err.code = AI_ERROR_CODE_NETWORK;
        return err;
    }

    for (int i=0; i<AI_MNETWORK_NUMBER; i++) {
        entry = &networks;
        if (ai_mnetwork_is_valid(name, entry)) {
            found = entry;
            break;
        }
    }

    if (!found) {
        err.type = AI_ERROR_INVALID_PARAM;
        err.code = AI_ERROR_CODE_NETWORK;
        return err;
    }

    if (network_config == NULL)
        err = found->ai_create(network, found->config);
    else
        err = found->ai_create(network, network_config);
    if ((err.code == AI_ERROR_CODE_NONE) && (err.type == AI_ERROR_NONE)) {
        inst->entry = found;
        inst->handle = *network;
        *network = (ai_handle*)inst;
    }

    return err;
}

AI_API_ENTRY
ai_handle ai_mnetwork_destroy(ai_handle network)
{
    struct network_instance *inn;
    inn =  ai_mnetwork_handle((struct network_instance *)network);
    if (inn) {
        ai_handle hdl = inn->entry->ai_destroy(inn->handle);
        if (hdl != inn->handle) {
            ai_mnetwork_release_handle(inn);
            network = AI_HANDLE_NULL;
        }
    }
    return network;
}

AI_API_ENTRY
ai_bool ai_mnetwork_get_info(ai_handle network, ai_network_report* report)
{
    struct network_instance *inn;
    inn =  ai_mnetwork_handle((struct network_instance *)network);
    if (inn)
        return inn->entry->ai_get_info(inn->handle, report);
    else
        return false;
}

AI_API_ENTRY
ai_error ai_mnetwork_get_error(ai_handle network)
{
    struct network_instance *inn;
    ai_error err;
    err.type = AI_ERROR_INVALID_PARAM;
    err.code = AI_ERROR_CODE_NETWORK;

    inn =  ai_mnetwork_handle((struct network_instance *)network);
    if (inn)
        return inn->entry->ai_get_error(inn->handle);
    else
        return err;
}

AI_API_ENTRY
ai_bool ai_mnetwork_init(ai_handle network, const ai_network_params* params)
{
    struct network_instance *inn;
    ai_network_params par;

    /* TODO: adding check ai_buffer activations/weights shape coherence */

    inn =  ai_mnetwork_handle((struct network_instance *)network);
    if (inn) {
        par = inn->entry->params;
        if (params->activations.n_batches)
            par.activations = params->activations;
        else
            par.activations.data = params->activations.data;
        if (params->params.n_batches)
            par.params = params->params;
        else
            par.params.data = inn->entry->ai_data_weights_get_default();
        return inn->entry->ai_init(inn->handle, &par);
    }
    else
        return false;
}

AI_API_ENTRY
ai_i32 ai_mnetwork_run(ai_handle network, const ai_buffer* input,
        ai_buffer* output)
{
    struct network_instance* inn;
    inn =  ai_mnetwork_handle((struct network_instance *)network);
    if (inn)
        return inn->entry->ai_run(inn->handle, input, output);
    else
        return 0;
}

AI_API_ENTRY
ai_i32 ai_mnetwork_forward(ai_handle network, const ai_buffer* input)
{
    struct network_instance *inn;
    inn =  ai_mnetwork_handle((struct network_instance *)network);
    if (inn)
        return inn->entry->ai_forward(inn->handle, input);
    else
        return 0;
}

AI_API_ENTRY
int ai_mnetwork_get_private_handle(ai_handle network,
         ai_handle *phandle,
         ai_network_params *pparams)
{
     struct network_instance* inn;
     inn =  ai_mnetwork_handle((struct network_instance *)network);
     if (inn && phandle && pparams) {
         *phandle = inn->handle;
         *pparams = inn->params;
         return 0;
     }
     else
         return -1;
}


AI_API_ENTRY
int ai_mnetwork_get_ext_data_activations(ai_handle network,
         ai_u32 *add,
         ai_u32 *size)
{
     struct network_instance* inn;
     inn =  ai_mnetwork_handle((struct network_instance *)network);
     if (inn && add && size) {
         *add = inn->entry->extActBufferStartAddr;
         *size = inn->entry->actBufferSize;
         return 0;
     }
     else
         return -1;
}


[/#if]
#ifdef __cplusplus
}
#endif

所属标签

相似问题

关于
我们是谁
投资者关系
意法半导体可持续发展举措
创新与技术
意法半导体官网
联系我们
联系ST分支机构
寻找销售人员和分销渠道
社区
媒体中心
活动与培训
隐私策略
隐私策略
Cookies管理
行使您的权利
官方最新发布
STM32Cube扩展软件包
意法半导体边缘AI套件
ST - 理想汽车豪华SUV案例
ST意法半导体智能家居案例
STM32 ARM Cortex 32位微控制器
关注我们
st-img 微信公众号
st-img 手机版