fix: clang format convention

This commit is contained in:
Ahmedounet 2022-06-13 17:54:45 +02:00
parent a601990e13
commit bb2c963a34
6 changed files with 73 additions and 77 deletions

View File

@ -419,8 +419,9 @@ wasm_native_init()
#if WASM_ENABLE_WASI_NN != 0
n_native_symbols = get_wasi_nn_export_apis(&native_symbols);
if (!wasm_native_register_natives("env", native_symbols, // TODO: check env or wasi_nn tag
n_native_symbols))
if (!wasm_native_register_natives(
"env", native_symbols, // TODO: check env or wasi_nn tag
n_native_symbols))
return false;
#endif

View File

@ -3,39 +3,43 @@
#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/model.h>
#include <tensorflow/lite/optional_debug_tools.h>
#include <tensorflow/lite/error_reporter.h>
enum Idx {GRAPH=0, GRAPH_SIZE=1 };
#include <tensorflow/lite/optional_debug_tools.h>
#include <tensorflow/lite/error_reporter.h>
enum Idx { GRAPH = 0, GRAPH_SIZE = 1 };
std::unique_ptr<tflite::Interpreter> interpreter = NULL;
std::unique_ptr<tflite::FlatBufferModel> model = NULL;
std::unique_ptr<tflite::FlatBufferModel> model = NULL;
uint32_t _load(graph_builder_array builder, graph_encoding encoding) {
uint32_t
_load(graph_builder_array builder, graph_encoding encoding)
{
if(encoding!=tensorflow){return invalid_argument;}
uint32_t *size = (uint32_t*) builder[Idx::GRAPH_SIZE];
tflite::ErrorReporter *error_reporter;
model = tflite::FlatBufferModel::BuildFromBuffer((const char *)builder[Idx::GRAPH], *size, error_reporter);
if(model== nullptr){
printf("failure: null model \n");
if (encoding != tensorflow) {
return invalid_argument;
}
}
// Build the interpreter with the InterpreterBuilder.
uint32_t *size = (uint32_t *)builder[Idx::GRAPH_SIZE];
tflite::ErrorReporter *error_reporter;
model = tflite::FlatBufferModel::BuildFromBuffer(
(const char *)builder[Idx::GRAPH], *size, error_reporter);
if (model == nullptr) {
printf("failure: null model \n");
return invalid_argument;
}
// Build the interpreter with the InterpreterBuilder.
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder tflite_builder(*model, resolver);
tflite::InterpreterBuilder tflite_builder(*model, resolver);
tflite_builder(&interpreter);
if(interpreter==nullptr){
if (interpreter == nullptr) {
printf("failure: null interpreter \n");
return invalid_argument;
}
}
return success;
}

View File

@ -9,7 +9,8 @@
extern "C" {
#endif
uint32_t _load(graph_builder_array builder, graph_encoding encoding);
uint32_t
_load(graph_builder_array builder, graph_encoding encoding);
#ifdef __cplusplus
}

View File

@ -11,21 +11,26 @@
/**
* @brief loader of tensorflow
*
* @param builder array of 2 pointers: first its the buffer, second its the size
*
* @param builder array of 2 pointers: first its the buffer, second its the size
*/
void load_tensorflow(wasm_module_inst_t instance, graph_builder_array builder) {
void
load_tensorflow(wasm_module_inst_t instance, graph_builder_array builder)
{
printf("Loading tensorflow...\n");
for (int i = 0; i < 2; ++i)
builder[i] = (graph_builder) wasm_runtime_addr_app_to_native(instance, builder[i]);
builder[i] = (graph_builder)wasm_runtime_addr_app_to_native(instance,
builder[i]);
}
uint32_t wasi_nn_load(wasm_exec_env_t exec_env, uint32_t builder, uint32_t encoding)
uint32_t
wasi_nn_load(wasm_exec_env_t exec_env, uint32_t builder, uint32_t encoding)
{
printf("Inside wasi_nn_load!\n\n");
wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env);
graph_builder_array buf = (graph_builder_array) wasm_runtime_addr_app_to_native(instance, builder);
switch ((graph_encoding) encoding) {
graph_builder_array buf =
(graph_builder_array)wasm_runtime_addr_app_to_native(instance, builder);
switch ((graph_encoding)encoding) {
case openvino:
return invalid_argument;
case tensorflow:
@ -34,28 +39,26 @@ uint32_t wasi_nn_load(wasm_exec_env_t exec_env, uint32_t builder, uint32_t encod
case onnx:
return invalid_argument;
}
return _load(buf, (graph_encoding) encoding);
return _load(buf, (graph_encoding)encoding);
}
void wasi_nn_init_execution_context()
{
void
wasi_nn_init_execution_context()
{}
}
void wasi_nn_set_input()
void
wasi_nn_set_input()
{
// interpreter->AllocateTensors();
}
void wasi_nn_compute()
{
void
wasi_nn_compute()
{}
}
void wasi_nn_get_output()
{
}
void
wasi_nn_get_output()
{}
/* clang-format off */
#define REG_NATIVE_FUNC(func_name, signature) \

View File

@ -10,23 +10,13 @@
typedef uint32_t buffer_size;
typedef enum {
success = 0,
invalid_argument,
missing_memory,
busy
} nn_erno;
typedef enum { success = 0, invalid_argument, missing_memory, busy } nn_erno;
typedef uint32_t * tensor_dimensions;
typedef uint32_t *tensor_dimensions;
typedef enum {
f16 = 0,
f32,
u8,
i32
} tensor_type;
typedef enum { f16 = 0, f32, u8, i32 } tensor_type;
typedef uint8_t* tensor_data;
typedef uint8_t *tensor_data;
typedef struct {
tensor_dimensions dimensions;
@ -34,30 +24,27 @@ typedef struct {
tensor_data data;
} tensor;
typedef uint8_t * graph_builder;
typedef uint8_t *graph_builder;
typedef graph_builder * graph_builder_array;
typedef graph_builder *graph_builder_array;
typedef enum {
openvino = 0,
tensorflow,
onnx
} graph_encoding;
typedef enum { openvino = 0, tensorflow, onnx } graph_encoding;
typedef enum {
cpu = 0,
gpu,
tpu
} execution_target;
typedef enum { cpu = 0, gpu, tpu } execution_target;
uint32_t load(graph_builder_array builder, graph_encoding encoding);
uint32_t
load(graph_builder_array builder, graph_encoding encoding);
void init_execution_context();
void
init_execution_context();
void set_input();
void
set_input();
void compute();
void
compute();
void get_output();
void
get_output();
#endif

View File

@ -25,7 +25,7 @@ generate_float(int iteration, double seed1, float seed2)
char *buf = strdup("test_message");
uint32_t *size = malloc(sizeof(uint32_t));
*size = 4096;
graph_builder_array arr[] = {(graph_builder)buf, (graph_builder)size};
graph_builder_array arr[] = { (graph_builder)buf, (graph_builder)size };
load(arr, 1);
float ret;