mirror of
https://github.com/bytecodealliance/wasm-micro-runtime.git
synced 2025-07-15 08:48:33 +00:00
fix: clang format convention
This commit is contained in:
parent
a601990e13
commit
bb2c963a34
|
@ -419,8 +419,9 @@ wasm_native_init()
|
||||||
|
|
||||||
#if WASM_ENABLE_WASI_NN != 0
|
#if WASM_ENABLE_WASI_NN != 0
|
||||||
n_native_symbols = get_wasi_nn_export_apis(&native_symbols);
|
n_native_symbols = get_wasi_nn_export_apis(&native_symbols);
|
||||||
if (!wasm_native_register_natives("env", native_symbols, // TODO: check env or wasi_nn tag
|
if (!wasm_native_register_natives(
|
||||||
n_native_symbols))
|
"env", native_symbols, // TODO: check env or wasi_nn tag
|
||||||
|
n_native_symbols))
|
||||||
return false;
|
return false;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -3,39 +3,43 @@
|
||||||
#include <tensorflow/lite/interpreter.h>
|
#include <tensorflow/lite/interpreter.h>
|
||||||
#include <tensorflow/lite/kernels/register.h>
|
#include <tensorflow/lite/kernels/register.h>
|
||||||
#include <tensorflow/lite/model.h>
|
#include <tensorflow/lite/model.h>
|
||||||
#include <tensorflow/lite/optional_debug_tools.h>
|
#include <tensorflow/lite/optional_debug_tools.h>
|
||||||
#include <tensorflow/lite/error_reporter.h>
|
#include <tensorflow/lite/error_reporter.h>
|
||||||
|
|
||||||
enum Idx {GRAPH=0, GRAPH_SIZE=1 };
|
|
||||||
|
|
||||||
|
enum Idx { GRAPH = 0, GRAPH_SIZE = 1 };
|
||||||
|
|
||||||
std::unique_ptr<tflite::Interpreter> interpreter = NULL;
|
std::unique_ptr<tflite::Interpreter> interpreter = NULL;
|
||||||
std::unique_ptr<tflite::FlatBufferModel> model = NULL;
|
std::unique_ptr<tflite::FlatBufferModel> model = NULL;
|
||||||
|
|
||||||
uint32_t _load(graph_builder_array builder, graph_encoding encoding) {
|
uint32_t
|
||||||
|
_load(graph_builder_array builder, graph_encoding encoding)
|
||||||
|
{
|
||||||
|
|
||||||
if(encoding!=tensorflow){return invalid_argument;}
|
if (encoding != tensorflow) {
|
||||||
|
|
||||||
uint32_t *size = (uint32_t*) builder[Idx::GRAPH_SIZE];
|
|
||||||
|
|
||||||
tflite::ErrorReporter *error_reporter;
|
|
||||||
|
|
||||||
model = tflite::FlatBufferModel::BuildFromBuffer((const char *)builder[Idx::GRAPH], *size, error_reporter);
|
|
||||||
|
|
||||||
if(model== nullptr){
|
|
||||||
printf("failure: null model \n");
|
|
||||||
return invalid_argument;
|
return invalid_argument;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build the interpreter with the InterpreterBuilder.
|
uint32_t *size = (uint32_t *)builder[Idx::GRAPH_SIZE];
|
||||||
|
|
||||||
|
tflite::ErrorReporter *error_reporter;
|
||||||
|
|
||||||
|
model = tflite::FlatBufferModel::BuildFromBuffer(
|
||||||
|
(const char *)builder[Idx::GRAPH], *size, error_reporter);
|
||||||
|
|
||||||
|
if (model == nullptr) {
|
||||||
|
printf("failure: null model \n");
|
||||||
|
return invalid_argument;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the interpreter with the InterpreterBuilder.
|
||||||
tflite::ops::builtin::BuiltinOpResolver resolver;
|
tflite::ops::builtin::BuiltinOpResolver resolver;
|
||||||
tflite::InterpreterBuilder tflite_builder(*model, resolver);
|
tflite::InterpreterBuilder tflite_builder(*model, resolver);
|
||||||
tflite_builder(&interpreter);
|
tflite_builder(&interpreter);
|
||||||
|
|
||||||
if(interpreter==nullptr){
|
if (interpreter == nullptr) {
|
||||||
printf("failure: null interpreter \n");
|
printf("failure: null interpreter \n");
|
||||||
return invalid_argument;
|
return invalid_argument;
|
||||||
}
|
}
|
||||||
|
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,8 @@
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
uint32_t _load(graph_builder_array builder, graph_encoding encoding);
|
uint32_t
|
||||||
|
_load(graph_builder_array builder, graph_encoding encoding);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,21 +11,26 @@
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief loader of tensorflow
|
* @brief loader of tensorflow
|
||||||
*
|
*
|
||||||
* @param builder array of 2 pointers: first its the buffer, second its the size
|
* @param builder array of 2 pointers: first its the buffer, second its the size
|
||||||
*/
|
*/
|
||||||
void load_tensorflow(wasm_module_inst_t instance, graph_builder_array builder) {
|
void
|
||||||
|
load_tensorflow(wasm_module_inst_t instance, graph_builder_array builder)
|
||||||
|
{
|
||||||
printf("Loading tensorflow...\n");
|
printf("Loading tensorflow...\n");
|
||||||
for (int i = 0; i < 2; ++i)
|
for (int i = 0; i < 2; ++i)
|
||||||
builder[i] = (graph_builder) wasm_runtime_addr_app_to_native(instance, builder[i]);
|
builder[i] = (graph_builder)wasm_runtime_addr_app_to_native(instance,
|
||||||
|
builder[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t wasi_nn_load(wasm_exec_env_t exec_env, uint32_t builder, uint32_t encoding)
|
uint32_t
|
||||||
|
wasi_nn_load(wasm_exec_env_t exec_env, uint32_t builder, uint32_t encoding)
|
||||||
{
|
{
|
||||||
printf("Inside wasi_nn_load!\n\n");
|
printf("Inside wasi_nn_load!\n\n");
|
||||||
wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env);
|
wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env);
|
||||||
graph_builder_array buf = (graph_builder_array) wasm_runtime_addr_app_to_native(instance, builder);
|
graph_builder_array buf =
|
||||||
switch ((graph_encoding) encoding) {
|
(graph_builder_array)wasm_runtime_addr_app_to_native(instance, builder);
|
||||||
|
switch ((graph_encoding)encoding) {
|
||||||
case openvino:
|
case openvino:
|
||||||
return invalid_argument;
|
return invalid_argument;
|
||||||
case tensorflow:
|
case tensorflow:
|
||||||
|
@ -34,28 +39,26 @@ uint32_t wasi_nn_load(wasm_exec_env_t exec_env, uint32_t builder, uint32_t encod
|
||||||
case onnx:
|
case onnx:
|
||||||
return invalid_argument;
|
return invalid_argument;
|
||||||
}
|
}
|
||||||
return _load(buf, (graph_encoding) encoding);
|
return _load(buf, (graph_encoding)encoding);
|
||||||
}
|
}
|
||||||
|
|
||||||
void wasi_nn_init_execution_context()
|
void
|
||||||
{
|
wasi_nn_init_execution_context()
|
||||||
|
{}
|
||||||
|
|
||||||
}
|
void
|
||||||
|
wasi_nn_set_input()
|
||||||
void wasi_nn_set_input()
|
|
||||||
{
|
{
|
||||||
// interpreter->AllocateTensors();
|
// interpreter->AllocateTensors();
|
||||||
}
|
}
|
||||||
|
|
||||||
void wasi_nn_compute()
|
void
|
||||||
{
|
wasi_nn_compute()
|
||||||
|
{}
|
||||||
|
|
||||||
}
|
void
|
||||||
|
wasi_nn_get_output()
|
||||||
void wasi_nn_get_output()
|
{}
|
||||||
{
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/* clang-format off */
|
/* clang-format off */
|
||||||
#define REG_NATIVE_FUNC(func_name, signature) \
|
#define REG_NATIVE_FUNC(func_name, signature) \
|
||||||
|
|
|
@ -10,23 +10,13 @@
|
||||||
|
|
||||||
typedef uint32_t buffer_size;
|
typedef uint32_t buffer_size;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum { success = 0, invalid_argument, missing_memory, busy } nn_erno;
|
||||||
success = 0,
|
|
||||||
invalid_argument,
|
|
||||||
missing_memory,
|
|
||||||
busy
|
|
||||||
} nn_erno;
|
|
||||||
|
|
||||||
typedef uint32_t * tensor_dimensions;
|
typedef uint32_t *tensor_dimensions;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum { f16 = 0, f32, u8, i32 } tensor_type;
|
||||||
f16 = 0,
|
|
||||||
f32,
|
|
||||||
u8,
|
|
||||||
i32
|
|
||||||
} tensor_type;
|
|
||||||
|
|
||||||
typedef uint8_t* tensor_data;
|
typedef uint8_t *tensor_data;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
tensor_dimensions dimensions;
|
tensor_dimensions dimensions;
|
||||||
|
@ -34,30 +24,27 @@ typedef struct {
|
||||||
tensor_data data;
|
tensor_data data;
|
||||||
} tensor;
|
} tensor;
|
||||||
|
|
||||||
typedef uint8_t * graph_builder;
|
typedef uint8_t *graph_builder;
|
||||||
|
|
||||||
typedef graph_builder * graph_builder_array;
|
typedef graph_builder *graph_builder_array;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum { openvino = 0, tensorflow, onnx } graph_encoding;
|
||||||
openvino = 0,
|
|
||||||
tensorflow,
|
|
||||||
onnx
|
|
||||||
} graph_encoding;
|
|
||||||
|
|
||||||
typedef enum {
|
typedef enum { cpu = 0, gpu, tpu } execution_target;
|
||||||
cpu = 0,
|
|
||||||
gpu,
|
|
||||||
tpu
|
|
||||||
} execution_target;
|
|
||||||
|
|
||||||
uint32_t load(graph_builder_array builder, graph_encoding encoding);
|
uint32_t
|
||||||
|
load(graph_builder_array builder, graph_encoding encoding);
|
||||||
|
|
||||||
void init_execution_context();
|
void
|
||||||
|
init_execution_context();
|
||||||
|
|
||||||
void set_input();
|
void
|
||||||
|
set_input();
|
||||||
|
|
||||||
void compute();
|
void
|
||||||
|
compute();
|
||||||
|
|
||||||
void get_output();
|
void
|
||||||
|
get_output();
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -25,7 +25,7 @@ generate_float(int iteration, double seed1, float seed2)
|
||||||
char *buf = strdup("test_message");
|
char *buf = strdup("test_message");
|
||||||
uint32_t *size = malloc(sizeof(uint32_t));
|
uint32_t *size = malloc(sizeof(uint32_t));
|
||||||
*size = 4096;
|
*size = 4096;
|
||||||
graph_builder_array arr[] = {(graph_builder)buf, (graph_builder)size};
|
graph_builder_array arr[] = { (graph_builder)buf, (graph_builder)size };
|
||||||
|
|
||||||
load(arr, 1);
|
load(arr, 1);
|
||||||
float ret;
|
float ret;
|
||||||
|
|
Loading…
Reference in New Issue
Block a user