From c474ac600c99fc860c630cee3123f9a5ac439613 Mon Sep 17 00:00:00 2001 From: Lianmin Zheng Date: Mon, 23 Nov 2020 20:39:17 -0800 Subject: [PATCH 1/4] [AutoScheduler] Fix task extraction --- python/tvm/relay/backend/compile_engine.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/python/tvm/relay/backend/compile_engine.py b/python/tvm/relay/backend/compile_engine.py index a3108a7f1b41..b51c7cabc075 100644 --- a/python/tvm/relay/backend/compile_engine.py +++ b/python/tvm/relay/backend/compile_engine.py @@ -186,6 +186,10 @@ def select_implementation(op, attrs, inputs, out_type, target, use_autotvm=True) all_impls = get_valid_implementations(op, attrs, inputs, out_type, target) best_plevel_impl = max(all_impls, key=lambda x: x.plevel) + # check if auto_scheduler is enabled, and disable autotvm if so + if PassContext.current().config.get("relay.backend.use_auto_scheduler", False): + use_autotvm = False + # If not use autotvm, always return the implementation with the highest priority if not use_autotvm: logger.info( @@ -288,10 +292,7 @@ def lower_call(call, inputs, target): env.tracing = False reenable_tracing = True - # check if auto_scheduler is enabled, and use pevel to select the implementation if so - use_auto_scheduler = PassContext.current().config.get("relay.backend.use_auto_scheduler", False) - - if not is_dyn and not use_auto_scheduler: + if not is_dyn: best_impl, outputs = select_implementation(op, call.attrs, inputs, ret_type, target) else: # TODO(@icemelon9): Allow tvm to generate multiple kernels for dynamic shapes. From f1a9616e20f3ebf1498c7ba136db6a07190538e9 Mon Sep 17 00:00:00 2001 From: Lianmin Zheng Date: Mon, 23 Nov 2020 20:43:56 -0800 Subject: [PATCH 2/4] fix --- python/tvm/relay/backend/compile_engine.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/tvm/relay/backend/compile_engine.py b/python/tvm/relay/backend/compile_engine.py index b51c7cabc075..8adfbb6abee9 100644 --- a/python/tvm/relay/backend/compile_engine.py +++ b/python/tvm/relay/backend/compile_engine.py @@ -186,7 +186,8 @@ def select_implementation(op, attrs, inputs, out_type, target, use_autotvm=True) all_impls = get_valid_implementations(op, attrs, inputs, out_type, target) best_plevel_impl = max(all_impls, key=lambda x: x.plevel) - # check if auto_scheduler is enabled, and disable autotvm if so + # Disable autotvm if auto_scheduler is enabled. + # (i.e. always return the implementation with the highest priority for auto-scheduler). if PassContext.current().config.get("relay.backend.use_auto_scheduler", False): use_autotvm = False From 520ab1f8b7d7595cfaf1f4aa93f42a6f3fa8bb0e Mon Sep 17 00:00:00 2001 From: Lianmin Zheng Date: Mon, 23 Nov 2020 20:46:51 -0800 Subject: [PATCH 3/4] fix --- python/tvm/relay/op/strategy/cuda.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tvm/relay/op/strategy/cuda.py b/python/tvm/relay/op/strategy/cuda.py index ceaf9ddb84b0..f37fc2a96cd5 100644 --- a/python/tvm/relay/op/strategy/cuda.py +++ b/python/tvm/relay/op/strategy/cuda.py @@ -107,7 +107,7 @@ def naive_schedule(_, outs, target): # For GPU, we at least need thread binding to make a valid schedule. # So the naive schedule cannot be compiled. raise RuntimeError( - "Cannot compile for GPU targets if no tuned schedule is found." + "Cannot compile for GPU targets if no tuned schedule is found. " "Please see the warning messages above for more information about the failed workloads." ) return tvm.te.create_schedule(outs[-1].op) From 1b72de050fbf39f17503ce51d5d1a53d8ceaa3eb Mon Sep 17 00:00:00 2001 From: Lianmin Zheng Date: Mon, 23 Nov 2020 23:47:44 -0800 Subject: [PATCH 4/4] trigger CI --- python/tvm/relay/backend/compile_engine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tvm/relay/backend/compile_engine.py b/python/tvm/relay/backend/compile_engine.py index 8adfbb6abee9..32affe73395c 100644 --- a/python/tvm/relay/backend/compile_engine.py +++ b/python/tvm/relay/backend/compile_engine.py @@ -187,7 +187,7 @@ def select_implementation(op, attrs, inputs, out_type, target, use_autotvm=True) best_plevel_impl = max(all_impls, key=lambda x: x.plevel) # Disable autotvm if auto_scheduler is enabled. - # (i.e. always return the implementation with the highest priority for auto-scheduler). + # (i.e., always return the implementation with the highest priority for auto-scheduler). if PassContext.current().config.get("relay.backend.use_auto_scheduler", False): use_autotvm = False