diff --git a/core/iwasm/aot/aot_runtime.c b/core/iwasm/aot/aot_runtime.c index 9d4d763b5b..5fa5438bd0 100644 --- a/core/iwasm/aot/aot_runtime.c +++ b/core/iwasm/aot/aot_runtime.c @@ -899,24 +899,6 @@ create_exports(AOTModuleInstance *module_inst, AOTModule *module, return create_export_funcs(module_inst, module, error_buf, error_buf_size); } -static bool -clear_wasi_proc_exit_exception(AOTModuleInstance *module_inst) -{ -#if WASM_ENABLE_LIBC_WASI != 0 - const char *exception = aot_get_exception(module_inst); - if (exception && !strcmp(exception, "Exception: wasi proc exit")) { - /* The "wasi proc exit" exception is thrown by native lib to - let wasm app exit, which is a normal behavior, we clear - the exception here. */ - aot_set_exception(module_inst, NULL); - return true; - } - return false; -#else - return false; -#endif -} - static bool execute_post_inst_function(AOTModuleInstance *module_inst) { @@ -956,7 +938,6 @@ execute_start_function(AOTModuleInstance *module_inst) u.f(exec_env); wasm_exec_env_destroy(exec_env); - (void)clear_wasi_proc_exit_exception(module_inst); return !aot_get_exception(module_inst); } @@ -1407,13 +1388,6 @@ aot_call_function(WASMExecEnv *exec_env, AOTFunctionInstance *function, ret = invoke_native_internal(exec_env, function->u.func.func_ptr, func_type, NULL, NULL, argv1, argc, argv); - if (!ret || aot_get_exception(module_inst)) { - if (clear_wasi_proc_exit_exception(module_inst)) - ret = true; - else - ret = false; - } - #if WASM_ENABLE_DUMP_CALL_STACK != 0 if (!ret) { if (aot_create_call_stack(exec_env)) { @@ -1473,9 +1447,6 @@ aot_call_function(WASMExecEnv *exec_env, AOTFunctionInstance *function, ret = invoke_native_internal(exec_env, function->u.func.func_ptr, func_type, NULL, NULL, argv, argc, argv); - if (clear_wasi_proc_exit_exception(module_inst)) - ret = true; - #if WASM_ENABLE_DUMP_CALL_STACK != 0 if (aot_get_exception(module_inst)) { if (aot_create_call_stack(exec_env)) { @@ -1516,7 +1487,7 @@ aot_create_exec_env_and_call_function(AOTModuleInstance *module_inst, } } - ret = aot_call_function(exec_env, func, argc, argv); + ret = wasm_runtime_call_wasm(exec_env, func, argc, argv); /* don't destroy the exec_env if it isn't created in this function */ if (!existing_exec_env) @@ -2006,9 +1977,6 @@ aot_call_indirect(WASMExecEnv *exec_env, uint32 tbl_idx, uint32 table_elem_idx, } fail: - if (clear_wasi_proc_exit_exception(module_inst)) - return true; - #ifdef OS_ENABLE_HW_BOUND_CHECK wasm_runtime_access_exce_check_guard_page(); #endif diff --git a/core/iwasm/common/wasm_exec_env.c b/core/iwasm/common/wasm_exec_env.c index 515a60b8e9..7939c75909 100644 --- a/core/iwasm/common/wasm_exec_env.c +++ b/core/iwasm/common/wasm_exec_env.c @@ -181,6 +181,9 @@ wasm_exec_env_destroy(WASMExecEnv *exec_env) the stopped thread will be overrided by other threads */ wasm_cluster_thread_exited(exec_env); #endif + /* We have terminated other threads, this is the only alive thread, so + * we don't acquire cluster->lock because the cluster will be destroyed + * inside this function */ wasm_cluster_del_exec_env(cluster, exec_env); } #endif /* end of WASM_ENABLE_THREAD_MGR */ diff --git a/core/iwasm/common/wasm_runtime_common.c b/core/iwasm/common/wasm_runtime_common.c index 88dd1c19d4..aed92edf2a 100644 --- a/core/iwasm/common/wasm_runtime_common.c +++ b/core/iwasm/common/wasm_runtime_common.c @@ -1743,6 +1743,30 @@ wasm_runtime_finalize_call_function(WASMExecEnv *exec_env, } #endif +static bool +clear_wasi_proc_exit_exception(WASMModuleInstanceCommon *module_inst_comm) +{ +#if WASM_ENABLE_LIBC_WASI != 0 + const char *exception; + WASMModuleInstance *module_inst = (WASMModuleInstance *)module_inst_comm; + + bh_assert(module_inst_comm->module_type == Wasm_Module_Bytecode + || module_inst_comm->module_type == Wasm_Module_AoT); + + exception = wasm_get_exception(module_inst); + if (exception && !strcmp(exception, "Exception: wasi proc exit")) { + /* The "wasi proc exit" exception is thrown by native lib to + let wasm app exit, which is a normal behavior, we clear + the exception here. */ + wasm_set_exception(module_inst, NULL); + return true; + } + return false; +#else + return false; +#endif +} + bool wasm_runtime_call_wasm(WASMExecEnv *exec_env, WASMFunctionInstanceCommon *function, uint32 argc, @@ -1783,10 +1807,15 @@ wasm_runtime_call_wasm(WASMExecEnv *exec_env, param_argc, new_argv); #endif if (!ret) { - if (new_argv != argv) { - wasm_runtime_free(new_argv); + if (clear_wasi_proc_exit_exception(exec_env->module_inst)) { + ret = true; + } + else { + if (new_argv != argv) { + wasm_runtime_free(new_argv); + } + return false; } - return false; } #if WASM_ENABLE_REF_TYPES != 0 @@ -2150,11 +2179,25 @@ wasm_runtime_get_exec_env_singleton(WASMModuleInstanceCommon *module_inst_comm) void wasm_set_exception(WASMModuleInstance *module_inst, const char *exception) { - if (exception) + WASMExecEnv *exec_env = NULL; + + if (exception) { snprintf(module_inst->cur_exception, sizeof(module_inst->cur_exception), "Exception: %s", exception); - else + } + else { module_inst->cur_exception[0] = '\0'; + } + +#if WASM_ENABLE_THREAD_MGR != 0 + exec_env = + wasm_clusters_search_exec_env((WASMModuleInstanceCommon *)module_inst); + if (exec_env) { + wasm_cluster_spread_exception(exec_env, exception ? false : true); + } +#else + (void)exec_env; +#endif } /* clang-format off */ @@ -4179,6 +4222,8 @@ bool wasm_runtime_call_indirect(WASMExecEnv *exec_env, uint32 element_indices, uint32 argc, uint32 argv[]) { + bool ret = false; + if (!wasm_runtime_exec_env_check(exec_env)) { LOG_ERROR("Invalid exec env stack info."); return false; @@ -4190,13 +4235,18 @@ wasm_runtime_call_indirect(WASMExecEnv *exec_env, uint32 element_indices, #if WASM_ENABLE_INTERP != 0 if (exec_env->module_inst->module_type == Wasm_Module_Bytecode) - return wasm_call_indirect(exec_env, 0, element_indices, argc, argv); + ret = wasm_call_indirect(exec_env, 0, element_indices, argc, argv); #endif #if WASM_ENABLE_AOT != 0 if (exec_env->module_inst->module_type == Wasm_Module_AoT) - return aot_call_indirect(exec_env, 0, element_indices, argc, argv); + ret = aot_call_indirect(exec_env, 0, element_indices, argc, argv); #endif - return false; + + if (!ret && clear_wasi_proc_exit_exception(exec_env->module_inst)) { + ret = true; + } + + return ret; } static void diff --git a/core/iwasm/interpreter/wasm_interp_classic.c b/core/iwasm/interpreter/wasm_interp_classic.c index 46c5ff444f..e49b885ce7 100644 --- a/core/iwasm/interpreter/wasm_interp_classic.c +++ b/core/iwasm/interpreter/wasm_interp_classic.c @@ -4018,24 +4018,6 @@ fast_jit_call_func_bytecode(WASMModuleInstance *module_inst, #endif /* end of WASM_ENABLE_FAST_JIT != 0 */ #if WASM_ENABLE_JIT != 0 -static bool -clear_wasi_proc_exit_exception(WASMModuleInstance *module_inst) -{ -#if WASM_ENABLE_LIBC_WASI != 0 - const char *exception = wasm_get_exception(module_inst); - if (exception && !strcmp(exception, "Exception: wasi proc exit")) { - /* The "wasi proc exit" exception is thrown by native lib to - let wasm app exit, which is a normal behavior, we clear - the exception here. */ - wasm_set_exception(module_inst, NULL); - return true; - } - return false; -#else - return false; -#endif -} - static bool llvm_jit_call_func_bytecode(WASMModuleInstance *module_inst, WASMExecEnv *exec_env, @@ -4095,14 +4077,6 @@ llvm_jit_call_func_bytecode(WASMModuleInstance *module_inst, ret = wasm_runtime_invoke_native( exec_env, module_inst->func_ptrs[func_idx], func_type, NULL, NULL, argv1, argc, argv); - - if (!ret || wasm_get_exception(module_inst)) { - if (clear_wasi_proc_exit_exception(module_inst)) - ret = true; - else - ret = false; - } - if (!ret) { if (argv1 != argv1_buf) wasm_runtime_free(argv1); @@ -4147,9 +4121,6 @@ llvm_jit_call_func_bytecode(WASMModuleInstance *module_inst, exec_env, module_inst->func_ptrs[func_idx], func_type, NULL, NULL, argv, argc, argv); - if (clear_wasi_proc_exit_exception(module_inst)) - ret = true; - return ret && !wasm_get_exception(module_inst) ? true : false; } } diff --git a/core/iwasm/interpreter/wasm_runtime.c b/core/iwasm/interpreter/wasm_runtime.c index cf363af4a1..27b3c985b7 100644 --- a/core/iwasm/interpreter/wasm_runtime.c +++ b/core/iwasm/interpreter/wasm_runtime.c @@ -2033,24 +2033,6 @@ wasm_lookup_table(const WASMModuleInstance *module_inst, const char *name) } #endif -static bool -clear_wasi_proc_exit_exception(WASMModuleInstance *module_inst) -{ -#if WASM_ENABLE_LIBC_WASI != 0 - const char *exception = wasm_get_exception(module_inst); - if (exception && !strcmp(exception, "Exception: wasi proc exit")) { - /* The "wasi proc exit" exception is thrown by native lib to - let wasm app exit, which is a normal behavior, we clear - the exception here. */ - wasm_set_exception(module_inst, NULL); - return true; - } - return false; -#else - return false; -#endif -} - #ifdef OS_ENABLE_HW_BOUND_CHECK static void @@ -2160,7 +2142,6 @@ wasm_call_function(WASMExecEnv *exec_env, WASMFunctionInstance *function, wasm_exec_env_set_thread_info(exec_env); interp_call_wasm(module_inst, exec_env, function, argc, argv); - (void)clear_wasi_proc_exit_exception(module_inst); return !wasm_get_exception(module_inst) ? true : false; } @@ -2188,7 +2169,7 @@ wasm_create_exec_env_and_call_function(WASMModuleInstance *module_inst, } } - ret = wasm_call_function(exec_env, func, argc, argv); + ret = wasm_runtime_call_wasm(exec_env, func, argc, argv); /* don't destroy the exec_env if it isn't created in this function */ if (!existing_exec_env) @@ -2458,7 +2439,6 @@ call_indirect(WASMExecEnv *exec_env, uint32 tbl_idx, uint32 elem_idx, interp_call_wasm(module_inst, exec_env, func_inst, argc, argv); - (void)clear_wasi_proc_exit_exception(module_inst); return !wasm_get_exception(module_inst) ? true : false; got_exception: diff --git a/core/iwasm/libraries/lib-pthread/lib_pthread_wrapper.c b/core/iwasm/libraries/lib-pthread/lib_pthread_wrapper.c index 2fb7033f36..3bf3269b80 100644 --- a/core/iwasm/libraries/lib-pthread/lib_pthread_wrapper.c +++ b/core/iwasm/libraries/lib-pthread/lib_pthread_wrapper.c @@ -520,8 +520,7 @@ pthread_start_routine(void *arg) if (!wasm_runtime_call_indirect(exec_env, routine_args->elem_index, 1, argv)) { - if (wasm_runtime_get_exception(module_inst)) - wasm_cluster_spread_exception(exec_env); + /* Exception has already been spread during throwing */ } /* destroy pthread key values */ diff --git a/core/iwasm/libraries/thread-mgr/thread_manager.c b/core/iwasm/libraries/thread-mgr/thread_manager.c index 4408b01331..a472a2ad3f 100644 --- a/core/iwasm/libraries/thread-mgr/thread_manager.c +++ b/core/iwasm/libraries/thread-mgr/thread_manager.c @@ -76,6 +76,7 @@ traverse_list(bh_list *l, list_visitor visitor, void *user_data) } } +/* The caller must lock cluster->lock */ static bool allocate_aux_stack(WASMCluster *cluster, uint32 *start, uint32 *size) { @@ -86,7 +87,6 @@ allocate_aux_stack(WASMCluster *cluster, uint32 *start, uint32 *size) if (!cluster->stack_segment_occupied) return false; - os_mutex_lock(&cluster->lock); for (i = 0; i < cluster_max_thread_num; i++) { if (!cluster->stack_segment_occupied[i]) { if (start) @@ -94,14 +94,14 @@ allocate_aux_stack(WASMCluster *cluster, uint32 *start, uint32 *size) if (size) *size = cluster->stack_size; cluster->stack_segment_occupied[i] = true; - os_mutex_unlock(&cluster->lock); return true; } } - os_mutex_unlock(&cluster->lock); + return false; } +/* The caller must lock cluster->lock */ static bool free_aux_stack(WASMCluster *cluster, uint32 start) { @@ -109,9 +109,7 @@ free_aux_stack(WASMCluster *cluster, uint32 start) for (i = 0; i < cluster_max_thread_num; i++) { if (start == cluster->stack_tops[i]) { - os_mutex_lock(&cluster->lock); cluster->stack_segment_occupied[i] = false; - os_mutex_unlock(&cluster->lock); return true; } } @@ -265,20 +263,21 @@ wasm_exec_env_get_cluster(WASMExecEnv *exec_env) return exec_env->cluster; } -bool +/* The caller must lock cluster->lock */ +static bool wasm_cluster_add_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env) { bool ret = true; exec_env->cluster = cluster; - os_mutex_lock(&cluster->lock); if (bh_list_insert(&cluster->exec_env_list, exec_env) != 0) ret = false; - os_mutex_unlock(&cluster->lock); + return ret; } +/* The caller should lock cluster->lock for thread safety */ bool wasm_cluster_del_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env) { @@ -301,10 +300,8 @@ wasm_cluster_del_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env) } #endif - os_mutex_lock(&cluster->lock); if (bh_list_remove(&cluster->exec_env_list, exec_env) != 0) ret = false; - os_mutex_unlock(&cluster->lock); if (cluster->exec_env_list.len == 0) { /* exec_env_list empty, destroy the cluster */ @@ -374,6 +371,12 @@ wasm_cluster_spawn_exec_env(WASMExecEnv *exec_env) return NULL; } + os_mutex_lock(&cluster->lock); + + if (cluster->has_exception || cluster->processing) { + goto fail1; + } + #if WASM_ENABLE_INTERP != 0 if (module_inst->module_type == Wasm_Module_Bytecode) { stack_size = @@ -390,7 +393,7 @@ wasm_cluster_spawn_exec_env(WASMExecEnv *exec_env) if (!(new_module_inst = wasm_runtime_instantiate_internal( module, true, stack_size, 0, NULL, 0))) { - return NULL; + goto fail1; } /* Set custom_data to new module instance */ @@ -405,32 +408,36 @@ wasm_cluster_spawn_exec_env(WASMExecEnv *exec_env) new_exec_env = wasm_exec_env_create_internal(new_module_inst, exec_env->wasm_stack_size); if (!new_exec_env) - goto fail1; + goto fail2; if (!allocate_aux_stack(cluster, &aux_stack_start, &aux_stack_size)) { LOG_ERROR("thread manager error: " "failed to allocate aux stack space for new thread"); - goto fail2; + goto fail3; } /* Set aux stack for current thread */ if (!wasm_exec_env_set_aux_stack(new_exec_env, aux_stack_start, aux_stack_size)) { - goto fail3; + goto fail4; } if (!wasm_cluster_add_exec_env(cluster, new_exec_env)) - goto fail3; + goto fail4; + + os_mutex_unlock(&cluster->lock); return new_exec_env; -fail3: +fail4: /* free the allocated aux stack space */ free_aux_stack(cluster, aux_stack_start); -fail2: +fail3: wasm_exec_env_destroy(new_exec_env); -fail1: +fail2: wasm_runtime_deinstantiate_internal(new_module_inst, true); +fail1: + os_mutex_unlock(&cluster->lock); return NULL; } @@ -443,8 +450,10 @@ wasm_cluster_destroy_spawned_exec_env(WASMExecEnv *exec_env) bh_assert(cluster != NULL); /* Free aux stack space */ + os_mutex_lock(&cluster->lock); free_aux_stack(cluster, exec_env->aux_stack_bottom.bottom); wasm_cluster_del_exec_env(cluster, exec_env); + os_mutex_unlock(&cluster->lock); wasm_exec_env_destroy_internal(exec_env); wasm_runtime_deinstantiate_internal(module_inst, true); @@ -468,15 +477,18 @@ thread_manager_start_routine(void *arg) #endif /* Routine exit */ - /* Free aux stack space */ - free_aux_stack(cluster, exec_env->aux_stack_bottom.bottom); /* Detach the native thread here to ensure the resources are freed */ wasm_cluster_detach_thread(exec_env); #if WASM_ENABLE_DEBUG_INTERP != 0 wasm_cluster_thread_exited(exec_env); #endif - /* Remove and destroy exec_env */ + os_mutex_lock(&cluster->lock); + /* Free aux stack space */ + free_aux_stack(cluster, exec_env->aux_stack_bottom.bottom); + /* Remove and exec_env */ wasm_cluster_del_exec_env(cluster, exec_env); + os_mutex_unlock(&cluster->lock); + /* destroy exec_env */ wasm_exec_env_destroy_internal(exec_env); os_thread_exit(ret); @@ -496,25 +508,31 @@ wasm_cluster_create_thread(WASMExecEnv *exec_env, cluster = wasm_exec_env_get_cluster(exec_env); bh_assert(cluster); + os_mutex_lock(&cluster->lock); + + if (cluster->has_exception || cluster->processing) { + goto fail1; + } + new_exec_env = wasm_exec_env_create_internal(module_inst, exec_env->wasm_stack_size); if (!new_exec_env) - return -1; + goto fail1; if (!allocate_aux_stack(cluster, &aux_stack_start, &aux_stack_size)) { LOG_ERROR("thread manager error: " "failed to allocate aux stack space for new thread"); - goto fail1; + goto fail2; } /* Set aux stack for current thread */ if (!wasm_exec_env_set_aux_stack(new_exec_env, aux_stack_start, aux_stack_size)) { - goto fail2; + goto fail3; } if (!wasm_cluster_add_exec_env(cluster, new_exec_env)) - goto fail2; + goto fail3; new_exec_env->thread_start_routine = thread_routine; new_exec_env->thread_arg = arg; @@ -523,18 +541,23 @@ wasm_cluster_create_thread(WASMExecEnv *exec_env, != os_thread_create(&tid, thread_manager_start_routine, (void *)new_exec_env, APP_THREAD_STACK_SIZE_DEFAULT)) { - goto fail3; + goto fail4; } + os_mutex_unlock(&cluster->lock); + return 0; -fail3: +fail4: wasm_cluster_del_exec_env(cluster, new_exec_env); -fail2: +fail3: /* free the allocated aux stack space */ free_aux_stack(cluster, aux_stack_start); -fail1: +fail2: wasm_exec_env_destroy(new_exec_env); +fail1: + os_mutex_unlock(&cluster->lock); + return -1; } @@ -761,17 +784,30 @@ wasm_cluster_exit_thread(WASMExecEnv *exec_env, void *retval) wasm_cluster_thread_exited(exec_env); #endif /* App exit the thread, free the resources before exit native thread */ - /* Free aux stack space */ - free_aux_stack(cluster, exec_env->aux_stack_bottom.bottom); /* Detach the native thread here to ensure the resources are freed */ wasm_cluster_detach_thread(exec_env); + os_mutex_lock(&cluster->lock); + /* Free aux stack space */ + free_aux_stack(cluster, exec_env->aux_stack_bottom.bottom); /* Remove and destroy exec_env */ wasm_cluster_del_exec_env(cluster, exec_env); + os_mutex_unlock(&cluster->lock); wasm_exec_env_destroy_internal(exec_env); os_thread_exit(retval); } +static void +set_thread_cancel_flags(WASMExecEnv *exec_env) +{ + /* Set the termination flag */ +#if WASM_ENABLE_DEBUG_INTERP != 0 + wasm_cluster_thread_send_signal(exec_env, WAMR_SIG_TERM); +#else + exec_env->suspend_flags.flags |= 0x01; +#endif +} + int32 wasm_cluster_cancel_thread(WASMExecEnv *exec_env) { @@ -783,12 +819,8 @@ wasm_cluster_cancel_thread(WASMExecEnv *exec_env) } os_mutex_unlock(&cluster_list_lock); - /* Set the termination flag */ -#if WASM_ENABLE_DEBUG_INTERP != 0 - wasm_cluster_thread_send_signal(exec_env, WAMR_SIG_TERM); -#else - exec_env->suspend_flags.flags |= 0x01; -#endif + set_thread_cancel_flags(exec_env); + return 0; } @@ -808,15 +840,31 @@ terminate_thread_visitor(void *node, void *user_data) void wasm_cluster_terminate_all(WASMCluster *cluster) { + os_mutex_lock(&cluster->lock); + cluster->processing = true; + os_mutex_unlock(&cluster->lock); + traverse_list(&cluster->exec_env_list, terminate_thread_visitor, NULL); + + os_mutex_lock(&cluster->lock); + cluster->processing = false; + os_mutex_unlock(&cluster->lock); } void wasm_cluster_terminate_all_except_self(WASMCluster *cluster, WASMExecEnv *exec_env) { + os_mutex_lock(&cluster->lock); + cluster->processing = true; + os_mutex_unlock(&cluster->lock); + traverse_list(&cluster->exec_env_list, terminate_thread_visitor, (void *)exec_env); + + os_mutex_lock(&cluster->lock); + cluster->processing = false; + os_mutex_unlock(&cluster->lock); } static void @@ -834,15 +882,31 @@ wait_for_thread_visitor(void *node, void *user_data) void wams_cluster_wait_for_all(WASMCluster *cluster) { + os_mutex_lock(&cluster->lock); + cluster->processing = true; + os_mutex_unlock(&cluster->lock); + traverse_list(&cluster->exec_env_list, wait_for_thread_visitor, NULL); + + os_mutex_lock(&cluster->lock); + cluster->processing = false; + os_mutex_unlock(&cluster->lock); } void wasm_cluster_wait_for_all_except_self(WASMCluster *cluster, WASMExecEnv *exec_env) { + os_mutex_lock(&cluster->lock); + cluster->processing = true; + os_mutex_unlock(&cluster->lock); + traverse_list(&cluster->exec_env_list, wait_for_thread_visitor, (void *)exec_env); + + os_mutex_lock(&cluster->lock); + cluster->processing = false; + os_mutex_unlock(&cluster->lock); } bool @@ -881,15 +945,19 @@ suspend_thread_visitor(void *node, void *user_data) void wasm_cluster_suspend_all(WASMCluster *cluster) { + os_mutex_lock(&cluster->lock); traverse_list(&cluster->exec_env_list, suspend_thread_visitor, NULL); + os_mutex_unlock(&cluster->lock); } void wasm_cluster_suspend_all_except_self(WASMCluster *cluster, WASMExecEnv *exec_env) { + os_mutex_lock(&cluster->lock); traverse_list(&cluster->exec_env_list, suspend_thread_visitor, (void *)exec_env); + os_mutex_unlock(&cluster->lock); } void @@ -910,7 +978,9 @@ resume_thread_visitor(void *node, void *user_data) void wasm_cluster_resume_all(WASMCluster *cluster) { + os_mutex_lock(&cluster->lock); traverse_list(&cluster->exec_env_list, resume_thread_visitor, NULL); + os_mutex_unlock(&cluster->lock); } static void @@ -919,24 +989,46 @@ set_exception_visitor(void *node, void *user_data) WASMExecEnv *curr_exec_env = (WASMExecEnv *)node; WASMExecEnv *exec_env = (WASMExecEnv *)user_data; WASMModuleInstanceCommon *module_inst = get_module_inst(exec_env); - WASMModuleInstanceCommon *curr_module_inst = get_module_inst(curr_exec_env); - const char *exception = wasm_runtime_get_exception(module_inst); - /* skip "Exception: " */ - exception += 11; + WASMModuleInstance *wasm_inst = (WASMModuleInstance *)module_inst; + + if (curr_exec_env != exec_env) { + WASMModuleInstance *curr_wasm_inst = + (WASMModuleInstance *)get_module_inst(curr_exec_env); + + bh_memcpy_s(curr_wasm_inst->cur_exception, + sizeof(curr_wasm_inst->cur_exception), + wasm_inst->cur_exception, sizeof(wasm_inst->cur_exception)); + /* Terminate the thread so it can exit from dead loops */ + set_thread_cancel_flags(curr_exec_env); + } +} + +static void +clear_exception_visitor(void *node, void *user_data) +{ + WASMExecEnv *exec_env = (WASMExecEnv *)user_data; + WASMExecEnv *curr_exec_env = (WASMExecEnv *)node; if (curr_exec_env != exec_env) { - curr_module_inst = get_module_inst(curr_exec_env); - wasm_runtime_set_exception(curr_module_inst, exception); + WASMModuleInstance *curr_wasm_inst = + (WASMModuleInstance *)get_module_inst(curr_exec_env); + + curr_wasm_inst->cur_exception[0] = '\0'; } } void -wasm_cluster_spread_exception(WASMExecEnv *exec_env) +wasm_cluster_spread_exception(WASMExecEnv *exec_env, bool clear) { WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env); bh_assert(cluster); - traverse_list(&cluster->exec_env_list, set_exception_visitor, exec_env); + os_mutex_lock(&cluster->lock); + cluster->has_exception = !clear; + traverse_list(&cluster->exec_env_list, + clear ? clear_exception_visitor : set_exception_visitor, + exec_env); + os_mutex_unlock(&cluster->lock); } static void @@ -964,7 +1056,9 @@ wasm_cluster_spread_custom_data(WASMModuleInstanceCommon *module_inst, cluster = wasm_exec_env_get_cluster(exec_env); bh_assert(cluster); + os_mutex_lock(&cluster->lock); traverse_list(&cluster->exec_env_list, set_custom_data_visitor, custom_data); + os_mutex_unlock(&cluster->lock); } } diff --git a/core/iwasm/libraries/thread-mgr/thread_manager.h b/core/iwasm/libraries/thread-mgr/thread_manager.h index c84d3c0219..657a4abe20 100644 --- a/core/iwasm/libraries/thread-mgr/thread_manager.h +++ b/core/iwasm/libraries/thread-mgr/thread_manager.h @@ -34,6 +34,18 @@ struct WASMCluster { uint32 stack_size; /* Record which segments are occupied */ bool *stack_segment_occupied; + /* When has_exception == true, this cluster should refuse any spawn thread + * requests, this flag can be cleared by calling + * wasm_runtime_clear_exception on instances of any threads of this cluster + */ + bool has_exception; + /* When processing is true, this cluster should refuse any spawn thread + * requests. This is a short-lived state, must be cleared immediately once + * the processing finished. + * This is used to avoid dead lock when one thread waiting another thread + * with lock, see wams_cluster_wait_for_all and wasm_cluster_terminate_all + */ + bool processing; #if WASM_ENABLE_DEBUG_INTERP != 0 WASMDebugInstance *debug_inst; #endif @@ -113,9 +125,6 @@ void wasm_cluster_wait_for_all_except_self(WASMCluster *cluster, WASMExecEnv *exec_env); -bool -wasm_cluster_add_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env); - bool wasm_cluster_del_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env); @@ -123,7 +132,7 @@ WASMExecEnv * wasm_clusters_search_exec_env(WASMModuleInstanceCommon *module_inst); void -wasm_cluster_spread_exception(WASMExecEnv *exec_env); +wasm_cluster_spread_exception(WASMExecEnv *exec_env, bool clear); WASMExecEnv * wasm_cluster_spawn_exec_env(WASMExecEnv *exec_env); diff --git a/samples/multi-thread/wasm-apps/CMakeLists.txt b/samples/multi-thread/wasm-apps/CMakeLists.txt index 44ced1cc83..ec8f7eefbb 100644 --- a/samples/multi-thread/wasm-apps/CMakeLists.txt +++ b/samples/multi-thread/wasm-apps/CMakeLists.txt @@ -38,3 +38,6 @@ set (CMAKE_EXE_LINKER_FLAGS add_executable(test.wasm main.c) target_link_libraries(test.wasm) + +add_executable(main_thread_exception.wasm main_thread_exception.c) +target_link_libraries(main_thread_exception.wasm) diff --git a/samples/multi-thread/wasm-apps/main_thread_exception.c b/samples/multi-thread/wasm-apps/main_thread_exception.c new file mode 100644 index 0000000000..80f170d40d --- /dev/null +++ b/samples/multi-thread/wasm-apps/main_thread_exception.c @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include +#include + +typedef struct ThreadArgs { + int start; + int length; +} ThreadArgs; + +void * +thread(void *args) +{ + while (1) { + /* When other threads (including main thread) throw exception, + this thread can successfully exit the dead loop */ + } +} + +int +main() +{ + pthread_t tids; + + if (pthread_create(&tids, NULL, thread, NULL) != 0) { + printf("pthread_create failed\n"); + } + + /* Trigger an exception */ + __builtin_trap(); + + return 0; +}