wasi-nn: retire is_model_loaded flag (#4613)

this flag doesn't make much sense anymore because:
- backends validate given graph/ctx by themselves
- some of them support loading multiple models for a context
This commit is contained in:
YAMAMOTO Takashi 2025-09-14 15:01:55 +09:00 committed by GitHub
parent ad21524573
commit 4f86468670
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 0 additions and 27 deletions

View File

@ -66,7 +66,6 @@ wasi_nn_ctx_destroy(WASINNContext *wasi_nn_ctx)
}
NN_DBG_PRINTF("[WASI NN] DEINIT...");
NN_DBG_PRINTF("Freeing wasi-nn");
NN_DBG_PRINTF("-> is_model_loaded: %d", wasi_nn_ctx->is_model_loaded);
NN_DBG_PRINTF("-> current_encoding: %d", wasi_nn_ctx->backend);
bh_assert(!wasi_nn_ctx->busy);
@ -202,15 +201,6 @@ wasi_nn_destroy()
}
/* Utils */
static wasi_nn_error
is_model_initialized(WASINNContext *wasi_nn_ctx)
{
if (!wasi_nn_ctx->is_model_loaded) {
NN_ERR_PRINTF("Model not initialized.");
return runtime_error;
}
return success;
}
/*
*TODO: choose a proper backend based on
@ -510,8 +500,6 @@ wasi_nn_load(wasm_exec_env_t exec_env, graph_builder_array_wasm *builder,
if (res != success)
goto fail;
wasi_nn_ctx->is_model_loaded = true;
fail:
// XXX: Free intermediate structure pointers
if (builder_native.buf)
@ -587,7 +575,6 @@ wasi_nn_load_by_name(wasm_exec_env_t exec_env, char *name, uint32_t name_len,
if (res != success)
goto fail;
wasi_nn_ctx->is_model_loaded = true;
res = success;
fail:
if (nul_terminated_name != NULL) {
@ -651,7 +638,6 @@ wasi_nn_load_by_name_with_config(wasm_exec_env_t exec_env, char *name,
if (res != success)
goto fail;
wasi_nn_ctx->is_model_loaded = true;
res = success;
fail:
if (nul_terminated_name != NULL) {
@ -684,9 +670,6 @@ wasi_nn_init_execution_context(wasm_exec_env_t exec_env, graph g,
goto fail;
}
if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail;
if (!wasm_runtime_validate_native_addr(
instance, ctx, (uint64)sizeof(graph_execution_context))) {
NN_ERR_PRINTF("ctx is invalid");
@ -719,9 +702,6 @@ wasi_nn_set_input(wasm_exec_env_t exec_env, graph_execution_context ctx,
goto fail;
}
if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail;
tensor input_tensor_native = { 0 };
if (success
!= (res = tensor_app_native(instance, input_tensor,
@ -756,9 +736,6 @@ wasi_nn_compute(wasm_exec_env_t exec_env, graph_execution_context ctx)
goto fail;
}
if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail;
call_wasi_nn_func(wasi_nn_ctx->backend, compute, res,
wasi_nn_ctx->backend_ctx, ctx);
fail:
@ -792,9 +769,6 @@ wasi_nn_get_output(wasm_exec_env_t exec_env, graph_execution_context ctx,
goto fail;
}
if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail;
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
if (!wasm_runtime_validate_native_addr(instance, output_tensor,
output_tensor_len)) {

View File

@ -15,7 +15,6 @@ typedef struct {
korp_mutex lock;
bool busy;
bool is_backend_ctx_initialized;
bool is_model_loaded;
graph_encoding backend;
void *backend_ctx;
} WASINNContext;