Add tensorflow sample under samples/workload/tensorflow (#427)

This commit is contained in:
Wang Ning
2020-10-22 16:18:37 +08:00
committed by GitHub
parent 4787b150b8
commit c515fb1b75
9 changed files with 426 additions and 0 deletions

View File

@ -0,0 +1,19 @@
"tensorflow" sample introduction
==============
This sample demonstrates how to build [tensorflow](https://github.com/tensorflow/tensorflow) into WebAssembly with emcc toolchain and run it with iwasm. Please first install [emsdk](https://github.com/emscripten-core/emsdk):
```bash
git clone https://github.com/emscripten-core/emsdk.git
cd emsdk
./emsdk install latest
./emsdk activate latest
```
And set up ensdk environment:
```bash
source emsdk_env.sh
```
Then run ./build.sh to build tensorflow and run it with iwasm, which basically contains the following steps:
- hack emcc to delete some objects in libc.a
- build tf-lite with emcc compiler
- build iwasm with pthread enable and include libiary under libc-emcc
- run benchmark model with iwasm:
--max-secs 300: means the max training time cost is 5 minutes, you can adjust by yourself

View File

@ -0,0 +1,97 @@
#!/bin/bash
####################################
# build tensorflow-lite sample #
####################################
set -x
set -e
EMSDK_WASM_DIR="$EM_CACHE/wasm"
BUILD_SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
OUT_DIR=${BUILD_SCRIPT_DIR}/out
TENSORFLOW_DIR="${BUILD_SCRIPT_DIR}/tensorflow"
TF_LITE_BUILD_DIR=${TENSORFLOW_DIR}/tensorflow/lite/tools/make
WAMR_DIR="${BUILD_SCRIPT_DIR}/../../../product-mini/platforms/linux"
function Clear_Before_Exit
{
[[ -f ${TENSORFLOW_DIR}/tf_lite.patch ]] &&
rm -f ${TENSORFLOW_DIR}/tf_lite.patch
# resume the libc.a under EMSDK_WASM_DIR
cd ${EMSDK_WASM_DIR}
mv libc.a.bak libc.a
}
# 1.hack emcc
cd ${EMSDK_WASM_DIR}
# back up libc.a
cp libc.a libc.a.bak
# delete some objects in libc.a
emar d libc.a open.o
emar d libc.a mmap.o
emar d libc.a munmap.o
emranlib libc.a
# 2. build tf-lite
cd ${BUILD_SCRIPT_DIR}
# 2.1 clone tf repo from Github and checkout to 2303ed commit
if [ ! -d "tensorflow" ]; then
git clone https://github.com/tensorflow/tensorflow.git
fi
cd ${TENSORFLOW_DIR}
git checkout 2303ed4bdb344a1fc4545658d1df6d9ce20331dd
# 2.2 copy the tf-lite.patch to tensorflow_root_dir and apply
cd ${TENSORFLOW_DIR}
cp ${BUILD_SCRIPT_DIR}/tf_lite.patch .
git checkout tensorflow/lite/tools/make/Makefile
git checkout tensorflow/lite/tools/make/targets/linux_makefile.inc
if [[ $(git apply tf_lite.patch 2>&1) =~ "error" ]]; then
echo "git apply patch failed, please check tf-lite related changes..."
Clear_Before_Exit
exit 0
fi
cd ${TF_LITE_BUILD_DIR}
# 2.3 download dependencies
if [ ! -d "${TF_LITE_BUILD_DIR}/downloads" ]; then
source download_dependencies.sh
fi
# 2.4 build tf-lite target
if [ -d "${TF_LITE_BUILD_DIR}/gen" ]; then
rm -fr ${TF_LITE_BUILD_DIR}/gen
fi
make -j 4 -C "${TENSORFLOW_DIR}" -f ${TF_LITE_BUILD_DIR}/Makefile $@
# 2.5 copy /make/gen target files to out/
rm -rf ${OUT_DIR}
mkdir ${OUT_DIR}
cp -r ${TF_LITE_BUILD_DIR}/gen/linux_x86_64/bin/. ${OUT_DIR}/
# 3. build iwasm with pthread and libc_emcc enable
cd ${WAMR_DIR}
rm -fr build && mkdir build
cd build && cmake .. -DWAMR_BUILD_LIB_PTHREAD=1 -DWAMR_BUILD_LIBC_EMCC=1
make
# 4. run tensorflow with iwasm
cd ${BUILD_SCRIPT_DIR}
# 4.1 download tf-lite model
if [ ! -f mobilenet_quant_v1_224.tflite ]; then
wget "https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_v1_224_android_quant_2017_11_08.zip"
unzip mobilenet_v1_224_android_quant_2017_11_08.zip
fi
# 4.2 run tf-lite model with iwasm
echo "---> run tensorflow benchmark model with iwasm"
${WAMR_DIR}/build/iwasm --heap-size=10475860 \
${OUT_DIR}/benchmark_model.wasm \
--graph=mobilenet_quant_v1_224.tflite --max_secs=300
Clear_Before_Exit

View File

@ -0,0 +1,78 @@
diff --git a/tensorflow/lite/tools/make/Makefile b/tensorflow/lite/tools/make/Makefile
index c7ddff5844..1082644043 100644
--- a/tensorflow/lite/tools/make/Makefile
+++ b/tensorflow/lite/tools/make/Makefile
@@ -48,11 +48,7 @@ INCLUDES += -I/usr/local/include
# These are the default libraries needed, but they can be added to or
# overridden by the platform-specific settings in target makefiles.
-LIBS := \
--lstdc++ \
--lpthread \
--lm \
--lz \
+LIBS := -lm \
-ldl
# There are no rules for compiling objects for the host system (since we don't
@@ -84,14 +80,18 @@ endif # ifeq ($(HOST_ARCH),$(TARGET_ARCH))
endif # ifeq ($(HOST_OS),$(TARGET))
endif
+LIBFLAGS += -s TOTAL_STACK=1048576 \
+ -Wl,--export=__data_end -Wl,--export=__heap_base \
+ -s ERROR_ON_UNDEFINED_SYMBOLS=0
+
# This library is the main target for this makefile. It will contain a minimal
# runtime that can be linked in to other programs.
LIB_NAME := libtensorflow-lite.a
# Benchmark static library and binary
BENCHMARK_LIB_NAME := benchmark-lib.a
-BENCHMARK_BINARY_NAME := benchmark_model
-BENCHMARK_PERF_OPTIONS_BINARY_NAME := benchmark_model_performance_options
+BENCHMARK_BINARY_NAME := benchmark_model.wasm
+BENCHMARK_PERF_OPTIONS_BINARY_NAME := benchmark_model_performance_options.wasm
# A small example program that shows how to link against the library.
MINIMAL_SRCS := \
@@ -277,12 +277,16 @@ LIB_PATH := $(LIBDIR)$(LIB_NAME)
BENCHMARK_LIB := $(LIBDIR)$(BENCHMARK_LIB_NAME)
BENCHMARK_BINARY := $(BINDIR)$(BENCHMARK_BINARY_NAME)
BENCHMARK_PERF_OPTIONS_BINARY := $(BINDIR)$(BENCHMARK_PERF_OPTIONS_BINARY_NAME)
-MINIMAL_BINARY := $(BINDIR)minimal
+MINIMAL_BINARY := $(BINDIR)minimal.wasm
LABEL_IMAGE_BINARY := $(BINDIR)label_image
-CXX := $(CC_PREFIX)${TARGET_TOOLCHAIN_PREFIX}g++
-CC := $(CC_PREFIX)${TARGET_TOOLCHAIN_PREFIX}gcc
-AR := $(CC_PREFIX)${TARGET_TOOLCHAIN_PREFIX}ar
+# CXX := $(CC_PREFIX)${TARGET_TOOLCHAIN_PREFIX}g++
+# CC := $(CC_PREFIX)${TARGET_TOOLCHAIN_PREFIX}gcc
+# AR := $(CC_PREFIX)${TARGET_TOOLCHAIN_PREFIX}ar
+
+CXX := em++
+CC := emcc
+AR := emar
MINIMAL_OBJS := $(addprefix $(OBJDIR), \
$(patsubst %.cc,%.o,$(patsubst %.c,%.o,$(MINIMAL_SRCS))))
diff --git a/tensorflow/lite/tools/make/targets/linux_makefile.inc b/tensorflow/lite/tools/make/targets/linux_makefile.inc
index 222cef9e5f..eea89a38f0 100644
--- a/tensorflow/lite/tools/make/targets/linux_makefile.inc
+++ b/tensorflow/lite/tools/make/targets/linux_makefile.inc
@@ -2,12 +2,10 @@
ifeq ($(TARGET), linux)
CXXFLAGS += \
-fPIC \
- -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK \
- -pthread
+ -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK
CFLAGS += \
-fPIC \
- -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK \
- -pthread
+ -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK
# TODO(petewarden): In the future we may want to add architecture-specific
# flags like -msse4.2
LIBS += -ldl