Skip to content

Commit

Permalink
[AutoScheduler] Fix task extraction (apache#6965)
Browse files Browse the repository at this point in the history
* [AutoScheduler] Fix task extraction

* fix

* fix

* trigger CI
  • Loading branch information
merrymercy authored and trevor-m committed Dec 4, 2020
1 parent 658305d commit 018a8fc
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 5 deletions.
10 changes: 6 additions & 4 deletions python/tvm/relay/backend/compile_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,11 @@ def select_implementation(op, attrs, inputs, out_type, target, use_autotvm=True)
all_impls = get_valid_implementations(op, attrs, inputs, out_type, target)
best_plevel_impl = max(all_impls, key=lambda x: x.plevel)

# Disable autotvm if auto_scheduler is enabled.
# (i.e., always return the implementation with the highest priority for auto-scheduler).
if PassContext.current().config.get("relay.backend.use_auto_scheduler", False):
use_autotvm = False

# If not use autotvm, always return the implementation with the highest priority
if not use_autotvm:
logger.info(
Expand Down Expand Up @@ -288,10 +293,7 @@ def lower_call(call, inputs, target):
env.tracing = False
reenable_tracing = True

# check if auto_scheduler is enabled, and use pevel to select the implementation if so
use_auto_scheduler = PassContext.current().config.get("relay.backend.use_auto_scheduler", False)

if not is_dyn and not use_auto_scheduler:
if not is_dyn:
best_impl, outputs = select_implementation(op, call.attrs, inputs, ret_type, target)
else:
# TODO(@icemelon9): Allow tvm to generate multiple kernels for dynamic shapes.
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/op/strategy/cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def naive_schedule(_, outs, target):
# For GPU, we at least need thread binding to make a valid schedule.
# So the naive schedule cannot be compiled.
raise RuntimeError(
"Cannot compile for GPU targets if no tuned schedule is found."
"Cannot compile for GPU targets if no tuned schedule is found. "
"Please see the warning messages above for more information about the failed workloads."
)
return tvm.te.create_schedule(outs[-1].op)
Expand Down

0 comments on commit 018a8fc

Please sign in to comment.