From e666d3c2ca98047adac1a99c56368bc1e1adde64 Mon Sep 17 00:00:00 2001 From: Lianmin Zheng Date: Tue, 24 Nov 2020 03:22:13 -0800 Subject: [PATCH] [AutoScheduler] Fix task extraction (#6965) * [AutoScheduler] Fix task extraction * fix * fix * trigger CI --- python/tvm/relay/backend/compile_engine.py | 10 ++++++---- python/tvm/relay/op/strategy/cuda.py | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/python/tvm/relay/backend/compile_engine.py b/python/tvm/relay/backend/compile_engine.py index a3108a7f1b41..32affe73395c 100644 --- a/python/tvm/relay/backend/compile_engine.py +++ b/python/tvm/relay/backend/compile_engine.py @@ -186,6 +186,11 @@ def select_implementation(op, attrs, inputs, out_type, target, use_autotvm=True) all_impls = get_valid_implementations(op, attrs, inputs, out_type, target) best_plevel_impl = max(all_impls, key=lambda x: x.plevel) + # Disable autotvm if auto_scheduler is enabled. + # (i.e., always return the implementation with the highest priority for auto-scheduler). + if PassContext.current().config.get("relay.backend.use_auto_scheduler", False): + use_autotvm = False + # If not use autotvm, always return the implementation with the highest priority if not use_autotvm: logger.info( @@ -288,10 +293,7 @@ def lower_call(call, inputs, target): env.tracing = False reenable_tracing = True - # check if auto_scheduler is enabled, and use pevel to select the implementation if so - use_auto_scheduler = PassContext.current().config.get("relay.backend.use_auto_scheduler", False) - - if not is_dyn and not use_auto_scheduler: + if not is_dyn: best_impl, outputs = select_implementation(op, call.attrs, inputs, ret_type, target) else: # TODO(@icemelon9): Allow tvm to generate multiple kernels for dynamic shapes. diff --git a/python/tvm/relay/op/strategy/cuda.py b/python/tvm/relay/op/strategy/cuda.py index ceaf9ddb84b0..f37fc2a96cd5 100644 --- a/python/tvm/relay/op/strategy/cuda.py +++ b/python/tvm/relay/op/strategy/cuda.py @@ -107,7 +107,7 @@ def naive_schedule(_, outs, target): # For GPU, we at least need thread binding to make a valid schedule. # So the naive schedule cannot be compiled. raise RuntimeError( - "Cannot compile for GPU targets if no tuned schedule is found." + "Cannot compile for GPU targets if no tuned schedule is found. " "Please see the warning messages above for more information about the failed workloads." ) return tvm.te.create_schedule(outs[-1].op)