Refactor WASI-NN to simplify the support for multiple frameworks (#1834)

- Reorganize the library structure
- Use the latest version of `wasi-nn` wit (Oct 25, 2022):
    0f77c48ec1/wasi-nn.wit.md
- Split logic that converts WASM structs to native structs in a separate file
- Simplify addition of new frameworks
This commit is contained in:
tonibofarull
2023-01-25 11:32:40 +01:00
committed by GitHub
parent 965edff4df
commit 9eed6686df
24 changed files with 911 additions and 504 deletions

View File

@ -0,0 +1 @@
Dockerfile

View File

@ -8,18 +8,24 @@ ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y \
cmake build-essential git wget python3.10 python3-pip
RUN wget -q https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-14/wasi-sdk-14.0-linux.tar.gz && \
tar xf wasi-sdk-*-linux.tar.gz -C /opt && rm -f wasi-sdk-*-linux.tar.gz && \
mv /opt/wasi-sdk-14.0 /opt/wasi-sdk
ARG WASI_SDK_VER=16
RUN wget -c --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-${WASI_SDK_VER}/wasi-sdk-${WASI_SDK_VER}.0-linux.tar.gz -P /opt \
&& tar xf /opt/wasi-sdk-${WASI_SDK_VER}.0-linux.tar.gz -C /opt \
&& ln -fs /opt/wasi-sdk-${WASI_SDK_VER}.0 /opt/wasi-sdk \
&& rm /opt/wasi-sdk-${WASI_SDK_VER}.0-linux.tar.gz
WORKDIR /home/wamr
COPY core/deps/install_tensorflow.sh core/deps/install_tensorflow.sh
RUN ./core/deps/install_tensorflow.sh
COPY core/iwasm/libraries/wasi-nn/test/requirements.txt .
RUN pip3 install -r requirements.txt
COPY core core
COPY build-scripts build-scripts
COPY product-mini product-mini
RUN pip3 install -r core/iwasm/libraries/wasi-nn/test/requirements.txt
WORKDIR /home/wamr/core/iwasm/libraries/wasi-nn/test/build
RUN cmake -DWAMR_BUILD_WASI_NN=1 ..

View File

@ -28,7 +28,7 @@ typedef struct {
// WASI-NN wrappers
error
wasm_load(char *model_name, graph *graph)
wasm_load(char *model_name, graph *g)
{
FILE *pFile = fopen(model_name, "r");
if (pFile == NULL)
@ -64,7 +64,7 @@ wasm_load(char *model_name, graph *graph)
arr.buf[0].size = result;
arr.buf[0].buf = buffer;
error res = load(&arr, tensorflow, cpu, graph);
error res = load(&arr, tensorflowlite, cpu, g);
fclose(pFile);
free(buffer);
@ -73,13 +73,13 @@ wasm_load(char *model_name, graph *graph)
}
error
wasm_init_execution_context(graph graph, graph_execution_context *ctx)
wasm_init_execution_context(graph g, graph_execution_context *ctx)
{
return init_execution_context(graph, ctx);
return init_execution_context(g, ctx);
}
error
wasm_input(graph_execution_context ctx, float *input_tensor, uint32_t *dim)
wasm_set_input(graph_execution_context ctx, float *input_tensor, uint32_t *dim)
{
tensor_dimensions dims;
dims.size = INPUT_TENSOR_DIMS;
@ -130,7 +130,7 @@ run_inference(float *input, uint32_t *input_size, uint32_t *output_size,
exit(1);
}
if (wasm_input(ctx, input, input_size) != success) {
if (wasm_set_input(ctx, input, input_size) != success) {
fprintf(stderr, "Error when setting input tensor.");
exit(1);
}
@ -151,7 +151,7 @@ run_inference(float *input, uint32_t *input_size, uint32_t *output_size,
*output_size = MAX_OUTPUT_TENSOR_SIZE - *output_size;
if (wasm_get_output(ctx, i, &out_tensor[offset], output_size)
!= success) {
fprintf(stderr, "Error when getting input .");
fprintf(stderr, "Error when getting output .");
exit(1);
}
@ -295,7 +295,6 @@ main()
test_mult_dimensions();
printf("################### Testing multiple outputs...\n");
test_mult_outputs();
printf("Tests: passed!\n");
return 0;
}