Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AutoScheduler] Fix task extraction #6965

Merged
merged 4 commits into from
Nov 24, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 6 additions & 4 deletions python/tvm/relay/backend/compile_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,11 @@ def select_implementation(op, attrs, inputs, out_type, target, use_autotvm=True)
all_impls = get_valid_implementations(op, attrs, inputs, out_type, target)
best_plevel_impl = max(all_impls, key=lambda x: x.plevel)

# Disable autotvm if auto_scheduler is enabled.
# (i.e., always return the implementation with the highest priority for auto-scheduler).
if PassContext.current().config.get("relay.backend.use_auto_scheduler", False):
use_autotvm = False

# If not use autotvm, always return the implementation with the highest priority
if not use_autotvm:
logger.info(
Expand Down Expand Up @@ -288,10 +293,7 @@ def lower_call(call, inputs, target):
env.tracing = False
reenable_tracing = True

# check if auto_scheduler is enabled, and use pevel to select the implementation if so
use_auto_scheduler = PassContext.current().config.get("relay.backend.use_auto_scheduler", False)

if not is_dyn and not use_auto_scheduler:
if not is_dyn:
best_impl, outputs = select_implementation(op, call.attrs, inputs, ret_type, target)
else:
# TODO(@icemelon9): Allow tvm to generate multiple kernels for dynamic shapes.
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/op/strategy/cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def naive_schedule(_, outs, target):
# For GPU, we at least need thread binding to make a valid schedule.
# So the naive schedule cannot be compiled.
raise RuntimeError(
"Cannot compile for GPU targets if no tuned schedule is found."
"Cannot compile for GPU targets if no tuned schedule is found. "
"Please see the warning messages above for more information about the failed workloads."
)
return tvm.te.create_schedule(outs[-1].op)
Expand Down