Update wasi nn definition

This commit is contained in:
tonibofarull 2022-06-12 12:50:34 +02:00
parent 712ffdb044
commit cb15b44174
3 changed files with 87 additions and 65 deletions

View File

@ -5,64 +5,61 @@
#include <string.h>
#include <stdlib.h>
#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/model.h>
#include <tensorflow/lite/optional_debug_tools.h>
#include "wasm_export.h"
std::unique_ptr<tflite::Interpreter> interpreter = NULL;
std::unique_ptr<tflite::FlatBufferModel> model =NULL;
// #include <tensorflow/lite/interpreter.h>
// #include <tensorflow/lite/kernels/register.h>
// #include <tensorflow/lite/model.h>
// #include <tensorflow/lite/optional_debug_tools.h>
º
void load(graph_builder_array builder, graph_encoding encoding)
// std::unique_ptr<tflite::Interpreter> interpreter = NULL;
// std::unique_ptr<tflite::FlatBufferModel> model = NULL;
void wasi_nn_load(wasm_exec_env_t exec_env, graph_builder_array builder, graph_encoding encoding)
{
model = tflite::FlatBufferModel::BuildFromBuffer(builder[0]);
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder builder(*model, resolver);
builder(&interpreter);
// tflite::ErrorReporter *error_reporter;
// model = tflite::FlatBufferModel::BuildFromBuffer(
// (const char *)builder[0],
// 1000, // TODO: find how to pass buffer size
// error_reporter
// );
// tflite::ops::builtin::BuiltinOpResolver resolver;
// tflite::InterpreterBuilder(*model, resolver)(&interpreter);
printf("Inside wasi_nn_load!\n\n");
}
void init_execution_context()
void wasi_nn_init_execution_context()
{
}
//run inference functions
void set_input()
void wasi_nn_set_input()
{
interpreter->AllocateTensors() ;
// interpreter->AllocateTensors();
}
void compute (graph_execution_context context )
{
return
}
void get_output()
void wasi_nn_compute()
{
}
/*
void wasi_nn_get_output()
{
}
/* clang-format off */
#define REG_NATIVE_FUNC(func_name, signature) \
{ #func_name, wasi_nn_##func_name, signature, NULL }
/* clang-format on */
static NativeSymbol native_symbols_wasi_nn[] = {
REG_NATIVE_FUNC(args_get, "(**)i"),
REG_NATIVE_FUNC(args_sizes_get, "(**)i"),
REG_NATIVE_FUNC(clock_res_get, "(i*)i"),
REG_NATIVE_FUNC(clock_time_get, "(iI*)i"),
REG_NATIVE_FUNC(environ_get, "(**)i"),
REG_NATIVE_FUNC(environ_sizes_get, "(**)i"),
REG_NATIVE_FUNC(load, "(ii)"),
};
*/
uint32
uint32_t
get_wasi_nn_export_apis(NativeSymbol **p_libc_wasi_apis)
{
*p_libc_wasi_apis = native_symbols_libc_wasi;
return sizeof(native_symbols_libc_wasi) / sizeof(NativeSymbol);
*p_libc_wasi_apis = native_symbols_wasi_nn;
return sizeof(native_symbols_wasi_nn) / sizeof(NativeSymbol);
}

View File

@ -1,9 +1,7 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
set (WASI_NN_DIR ${CMAKE_CURRENT_LIST_DIR})
cmake_minimum_required (VERSION 2.8...3.16)
add_definitions (-DWASM_ENABLE_WASI_NN=1)
project(wasi_nn)
file (GLOB_RECURSE source_all ${WASI_NN_DIR}/*.c)
add_library(${PROJECT_NAME} STATIC ${CMAKE_CURRENT_LIST_DIR}/wasi_nn.c)
target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_LIST_DIR})
set (LIBC_WASI_NN_SOURCE ${source_all})

View File

@ -1,36 +1,63 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef WASI_NN_H
#define WASI_NN_H
#ifndef _WASI_SOCKET_EXT_H_
#define _WASI_SOCKET_EXT_H_
#include <stddef.h>
#include <stdint.h>
/**
* Following definition from:
* https://github.com/WebAssembly/wasi-nn/blob/c557b2e9f84b6630f13b3185b43607f0388343b2/phases/ephemeral/witx/wasi_ephemeral_nn.witx
*/
typedef uint32_t buffer_size;
typedef enum:uint16_t {sucess, invalid_argument, missing_memory, busy } nn_erno;
typedef enum {
suces = 0,
invalid_argument,
missing_memory,
busy
} nn_erno;
typedef uint32_t * tensor_dimensions;
typedef enum:uint8_t {f16, f32, u8, i32};
typedef enum {
f16 = 0,
f32,
u8,
i32
} tensor_type;
typedef uint8_t* tensor_data;
typedef struct {
tensor_dimensions dimensions;
tensor_type type;
tensor_data data;
} tensor;
typedef uint8_t * graph_builder;
typedef graph_builder * graph_builder_array;
typedef handle graph;
typedef enum {
openvino = 0,
tensorflow,
onnx
} graph_encoding;
typedef enum {openvino=0, tensorflow,onnx } graph_encoding;
typedef enum {
cpu = 0,
gpu,
tpu
} execution_target;
void load(graph_builder_array builder, graph_encoding encoding);
void init_execution_context();
void set_input();
void compute();
void get_output();
#endif