fix: correct typos and improve comments across multiple files by codespell (#4116)
Signed-off-by: Huang Qi <huangqi3@xiaomi.com>
This commit is contained in:
@ -991,7 +991,7 @@ __wasi_sock_get_ipv6_only(__wasi_fd_t fd, bool *option)
|
||||
/**
|
||||
* TODO: modify recv() and send()
|
||||
* since don't want to re-compile the wasi-libc,
|
||||
* we tend to keep original implentations of recv() and send().
|
||||
* we tend to keep original implementations of recv() and send().
|
||||
*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
@ -65,7 +65,7 @@ test(int iter_num, int max_threads_num, int retry_num, int retry_time_us)
|
||||
}
|
||||
|
||||
while ((__atomic_load_n(&threads_in_use, __ATOMIC_SEQ_CST) != 0)) {
|
||||
// Casting to int* to supress compiler warning
|
||||
// Casting to int* to suppress compiler warning
|
||||
__builtin_wasm_memory_atomic_wait32((int *)(&threads_in_use), 0,
|
||||
second_us);
|
||||
}
|
||||
|
||||
@ -26,7 +26,7 @@ typedef __wasi_dircookie_t wasi_dircookie_t;
|
||||
// result are not guaranteed to be zero'ed by us so the result essentially
|
||||
// contains garbage from the WASM app perspective. To prevent this, we return
|
||||
// uint32 directly instead so as not to be reliant on the correct behaviour of
|
||||
// any current/future WASI SDK implemenations.
|
||||
// any current/future WASI SDK implementations.
|
||||
typedef uint32_t wasi_errno_t;
|
||||
typedef __wasi_event_t wasi_event_t;
|
||||
typedef __wasi_exitcode_t wasi_exitcode_t;
|
||||
|
||||
@ -6,7 +6,7 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* The defitions of type, macro and structure in this file should be
|
||||
* The definitions of type, macro and structure in this file should be
|
||||
* consistent with those in wasi-libc:
|
||||
* https://github.com/WebAssembly/wasi-libc/blob/main/libc-bottom-half/headers/public/wasi/api.h
|
||||
*/
|
||||
|
||||
@ -14,7 +14,7 @@ extern char const *LLAMA_COMMIT;
|
||||
extern char const *LLAMA_COMPILER;
|
||||
extern char const *LLAMA_BUILD_TARGET;
|
||||
|
||||
// compatable with WasmEdge
|
||||
// compatible with WasmEdge
|
||||
// https://github.com/second-state/WasmEdge-WASINN-examples/blob/master/wasmedge-ggml/README.md#parameters
|
||||
// https://github.com/WasmEdge/WasmEdge/blob/master/plugins/wasi_nn/ggml.cpp
|
||||
struct wasi_nn_llama_config {
|
||||
|
||||
@ -56,7 +56,7 @@ initialize_g(TFLiteContext *tfl_ctx, graph *g)
|
||||
os_mutex_lock(&tfl_ctx->g_lock);
|
||||
if (tfl_ctx->current_models == MAX_GRAPHS_PER_INST) {
|
||||
os_mutex_unlock(&tfl_ctx->g_lock);
|
||||
NN_ERR_PRINTF("Excedded max graphs per WASM instance");
|
||||
NN_ERR_PRINTF("Exceeded max graphs per WASM instance");
|
||||
return runtime_error;
|
||||
}
|
||||
*g = tfl_ctx->current_models++;
|
||||
@ -70,7 +70,7 @@ initialize_graph_ctx(TFLiteContext *tfl_ctx, graph g,
|
||||
os_mutex_lock(&tfl_ctx->g_lock);
|
||||
if (tfl_ctx->current_interpreters == MAX_GRAPH_EXEC_CONTEXTS_PER_INST) {
|
||||
os_mutex_unlock(&tfl_ctx->g_lock);
|
||||
NN_ERR_PRINTF("Excedded max graph execution context per WASM instance");
|
||||
NN_ERR_PRINTF("Exceeded max graph execution context per WASM instance");
|
||||
return runtime_error;
|
||||
}
|
||||
*ctx = tfl_ctx->current_interpreters++;
|
||||
@ -325,7 +325,7 @@ set_input(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
|
||||
int size = model_tensor_size * sizeof(float);
|
||||
bh_memcpy_s(it, size, input_tensor->data, size);
|
||||
}
|
||||
else { // TODO: Assumming uint8 quantized networks.
|
||||
else { // TODO: Assuming uint8 quantized networks.
|
||||
TfLiteAffineQuantization *quant_info =
|
||||
(TfLiteAffineQuantization *)tensor->quantization.params;
|
||||
if (quant_info->scale->size != 1 || quant_info->zero_point->size != 1) {
|
||||
@ -406,7 +406,7 @@ get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
|
||||
int size = model_tensor_size * sizeof(float);
|
||||
bh_memcpy_s(output_tensor, size, ot, size);
|
||||
}
|
||||
else { // TODO: Assumming uint8 quantized networks.
|
||||
else { // TODO: Assuming uint8 quantized networks.
|
||||
TfLiteAffineQuantization *quant_info =
|
||||
(TfLiteAffineQuantization *)tensor->quantization.params;
|
||||
if (quant_info->scale->size != 1 || quant_info->zero_point->size != 1) {
|
||||
|
||||
@ -286,7 +286,7 @@ def execute_wasmedge_ggml_qwen(iwasm_bin: str, wasmedge_bin: str, cwd: Path):
|
||||
|
||||
p.stdin.write(b"hi\n")
|
||||
p.stdin.flush()
|
||||
# ASSITANT
|
||||
# ASSISTANT
|
||||
p.stdout.readline()
|
||||
# xxx
|
||||
p.stdout.readline()
|
||||
@ -296,7 +296,7 @@ def execute_wasmedge_ggml_qwen(iwasm_bin: str, wasmedge_bin: str, cwd: Path):
|
||||
p.stdin.write(prompt.encode())
|
||||
p.stdin.write(b"\n")
|
||||
p.stdin.flush()
|
||||
# ASSITANT
|
||||
# ASSISTANT
|
||||
p.stdout.readline()
|
||||
# xxx
|
||||
answer = p.stdout.readline().decode("utf-8")
|
||||
|
||||
Reference in New Issue
Block a user