wasi-nn: Improve TPU support (#2447)
1. Allow TPU and GPU support at the same time. 2. Add Dockerfile to run example with [Coral USB](https://coral.ai/products/accelerator/).
This commit is contained in:
@ -24,7 +24,7 @@ RUN apt-get install -y wget ca-certificates --no-install-recommends \
|
||||
|
||||
RUN cmake \
|
||||
-DWAMR_BUILD_WASI_NN=1 \
|
||||
-DWASI_NN_ENABLE_GPU=1 \
|
||||
-DWAMR_BUILD_WASI_NN_ENABLE_GPU=1 \
|
||||
..
|
||||
|
||||
RUN make -j "$(grep -c ^processor /proc/cpuinfo)"
|
||||
|
||||
37
core/iwasm/libraries/wasi-nn/test/Dockerfile.tpu
Normal file
37
core/iwasm/libraries/wasi-nn/test/Dockerfile.tpu
Normal file
@ -0,0 +1,37 @@
|
||||
# Copyright (C) 2019 Intel Corporation. All rights reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
|
||||
FROM ubuntu:20.04 AS base
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# hadolint ignore=DL3008
|
||||
RUN apt-get update && apt-get install -y \
|
||||
cmake build-essential git curl gnupg --no-install-recommends && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# hadolint ignore=DL3008,DL4006
|
||||
RUN echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | tee /etc/apt/sources.list.d/coral-edgetpu.list && \
|
||||
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \
|
||||
apt-get update && apt-get install -y libedgetpu1-std --no-install-recommends && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /home/wamr
|
||||
|
||||
COPY . .
|
||||
|
||||
WORKDIR /home/wamr/product-mini/platforms/linux/build
|
||||
|
||||
RUN cmake \
|
||||
-DWAMR_BUILD_WASI_NN=1 \
|
||||
-DWAMR_BUILD_WASI_NN_ENABLE_EXTERNAL_DELEGATE=1 \
|
||||
-DWAMR_BUILD_WASI_NN_EXTERNAL_DELEGATE_PATH="libedgetpu.so.1.0" \
|
||||
-DWAMR_BUILD_WASI_NN_ENABLE_GPU=1 \
|
||||
..
|
||||
|
||||
RUN make -j "$(grep -c ^processor /proc/cpuinfo)" && \
|
||||
cp /home/wamr/product-mini/platforms/linux/build/iwasm /iwasm
|
||||
|
||||
WORKDIR /assets
|
||||
|
||||
ENTRYPOINT [ "/iwasm" ]
|
||||
@ -132,8 +132,8 @@ run_inference(execution_target target, float *input, uint32_t *input_size,
|
||||
*output_size = MAX_OUTPUT_TENSOR_SIZE - *output_size;
|
||||
if (wasm_get_output(ctx, i, &out_tensor[offset], output_size)
|
||||
!= success) {
|
||||
NN_ERR_PRINTF("Error when getting output.");
|
||||
exit(1);
|
||||
NN_ERR_PRINTF("Error when getting index %d.", i);
|
||||
break;
|
||||
}
|
||||
|
||||
offset += *output_size;
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
#include "wasi_nn.h"
|
||||
|
||||
#define MAX_MODEL_SIZE 85000000
|
||||
#define MAX_OUTPUT_TENSOR_SIZE 200
|
||||
#define MAX_OUTPUT_TENSOR_SIZE 1000000
|
||||
#define INPUT_TENSOR_DIMS 4
|
||||
#define EPSILON 1e-8
|
||||
|
||||
|
||||
Reference in New Issue
Block a user