wasi-nn: Enable GPU support (#1922)

- Split logic in several dockers
  - runtime: wasi-nn-cpu and wasi-nn- Nvidia-gpu.
  - compilation: wasi-nn-compile. Prepare the testing wasm and generates the TFLites.
- Implement GPU support for TFLite with Opencl.
This commit is contained in:
tonibofarull
2023-02-02 01:09:46 +01:00
committed by GitHub
parent fe3347d5d2
commit 1614ce12fa
10 changed files with 178 additions and 56 deletions

View File

@ -16,6 +16,7 @@
#include <tensorflow/lite/model.h>
#include <tensorflow/lite/optional_debug_tools.h>
#include <tensorflow/lite/error_reporter.h>
#include <tensorflow/lite/delegates/gpu/delegate.h>
/* Global variables */
@ -45,8 +46,8 @@ tensorflowlite_load(graph_builder_array *builder, graph_encoding encoding,
return invalid_argument;
}
if (target != cpu) {
NN_ERR_PRINTF("Only CPU target is supported.");
if (target != cpu && target != gpu) {
NN_ERR_PRINTF("Only CPU and GPU target is supported.");
return invalid_argument;
}
@ -79,6 +80,29 @@ tensorflowlite_load(graph_builder_array *builder, graph_encoding encoding,
return missing_memory;
}
bool use_default = false;
switch (target) {
case gpu:
{
// https://www.tensorflow.org/lite/performance/gpu
auto options = TfLiteGpuDelegateOptionsV2Default();
options.inference_preference =
TFLITE_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED;
options.inference_priority1 =
TFLITE_GPU_INFERENCE_PRIORITY_MIN_LATENCY;
auto *delegate = TfLiteGpuDelegateV2Create(&options);
if (interpreter->ModifyGraphWithDelegate(delegate) != kTfLiteOk) {
NN_ERR_PRINTF("Error when enabling GPU delegate.");
use_default = true;
}
break;
}
default:
use_default = true;
}
if (use_default)
NN_WARN_PRINTF("Default encoding is CPU.");
return success;
}