Skip to content

Commit d6876f1

Browse files
authored
wasi_nn_llamacpp.c: fix buffer overruns in set_input (#4420)
note: for some reasons, wasmedge seems to ignore type/dimensions for the input of ggml. some user code relies on it. cf. second-state/WasmEdge-WASINN-examples#196 note: despite the comment in our code, the input doesn't seem nul-terminated.
1 parent 2372a47 commit d6876f1

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

core/iwasm/libraries/wasi-nn/src/wasi_nn_llamacpp.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -384,18 +384,18 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index,
384384
tensor *wasi_nn_tensor)
385385
{
386386
struct LlamaContext *backend_ctx = (struct LlamaContext *)ctx;
387-
// tensor->data is the prompt string. ends with \0
387+
// tensor->data is the prompt string.
388388
char *prompt_text = (char *)wasi_nn_tensor->data.buf;
389+
uint32_t prompt_text_len = wasi_nn_tensor->data.size;
389390

390391
#ifndef NDEBUG
391392
NN_DBG_PRINTF("--------------------------------------------------");
392-
NN_DBG_PRINTF("prompt_text: %s", prompt_text);
393+
NN_DBG_PRINTF("prompt_text: %.*s", (int)prompt_text_len, prompt_text);
393394
NN_DBG_PRINTF("--------------------------------------------------");
394395
#endif
395396

396397
// tokenize the prompt
397398
uint32_t n_token_max = llama_n_ctx(backend_ctx->ctx);
398-
uint32_t prompt_text_len = strlen(prompt_text);
399399

400400
if (backend_ctx->prompt == NULL) {
401401
backend_ctx->prompt = calloc(n_token_max, sizeof(llama_token));

0 commit comments

Comments
 (0)