Skip to content

Commit

Permalink
OpenXLA-specific changes
Browse files Browse the repository at this point in the history
  • Loading branch information
Moerafaat committed Apr 30, 2024
1 parent 8d2ca1c commit 5441efa
Show file tree
Hide file tree
Showing 25 changed files with 1,595 additions and 66 deletions.
935 changes: 935 additions & 0 deletions BUILD

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,18 @@ class ElementwiseOpConversionBase : public ConvertOpToLLVMPattern<SourceOp> {
: ConvertOpToLLVMPattern<SourceOp>(typeConverter, benefit),
axisAnalysisPass(axisAnalysisPass) {}

// True if elements allocated to a thread are contiguous within the axis. This
// is not the case in MMA-like encodings wherea thread might have elements
// (0,0),(0,1) and (8,0),(8,1) for example. The problem with this is that the
// deduplication mechanism assumes that for example constancy=4 and
// elements/thread=4 that if a thread has all elements constant.
bool contiguouslyMapped(Attribute encoding) const {
if (auto slice = encoding.dyn_cast<triton::gpu::SliceEncodingAttr>()) {
return contiguouslyMapped(slice.getParent());
}
return encoding.isa<triton::gpu::BlockedEncodingAttr>();
}

// Try to deduplicate the resultVals based on the
// constancy properties of the result discovered by
// the axis analysis pass. If possible, redundant
Expand All @@ -93,8 +105,7 @@ class ElementwiseOpConversionBase : public ConvertOpToLLVMPattern<SourceOp> {
if (!encoding)
// encoding not available
return resultVals;
if (!encoding.dyn_cast<BlockedEncodingAttr>() &&
!encoding.dyn_cast<SliceEncodingAttr>()) {
if (!contiguouslyMapped(encoding)) {
// TODO: constraining the ecndoing type here is necessary for avoiding
// crashes in the getElemsPerThread call below happening in the
// test_core::test_fp8_dot_acc
Expand Down
3 changes: 2 additions & 1 deletion lib/Analysis/Utility.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -527,7 +527,8 @@ bool supportMMA(triton::DotOp op, int version) {
auto aElemTy = op.getA().getType().getElementType();
auto bElemTy = op.getB().getType().getElementType();
if (version == 3) {
if (triton::tools::getBoolEnv("DISABLE_MMA_V3"))
// TODO(b/311157761): enable mma_v3
if (!triton::tools::getBoolEnv("ENABLE_MMA_V3"))
return false;
auto retType = op.getType();
auto retShapePerCTA = getShapePerCTA(retType);
Expand Down
3 changes: 2 additions & 1 deletion lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,8 @@ SmallVector<Value> reorderValues(const SmallVector<Value> &values, Type inType,
auto ouEltTy = ouTensorTy.getElementType();
if (inBitWidth == ouBitWidth)
return values;
if (inBitWidth == 16 && ouBitWidth == 32) {
if ((inBitWidth == 16 && ouBitWidth == 32) ||
(inBitWidth == 32 && ouBitWidth == 16)) {
SmallVector<Value> ret;
for (unsigned i = 0; i < values.size(); i += 8) {
ret.push_back(values[i]);
Expand Down
2 changes: 1 addition & 1 deletion lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ using ttg::SliceEncodingAttr;
// Get the highest version supported for the hardware and the dot.
static int getMMAVersionSafe(int computeCapability, tt::DotOp op) {
int baseVersion = 0;
if (computeCapability < 75) {
if (computeCapability < 80) {
baseVersion = 1;
} else if (computeCapability < 90) {
baseVersion = 2;
Expand Down
17 changes: 16 additions & 1 deletion lib/Dialect/TritonGPU/Transforms/OptimizeDotOperands.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,8 @@ class HoistLayoutConversion : public OpRewritePattern<ConvertLayoutOp> {
PatternRewriter &rewriter) const override {
// Only consider conversions to dot operand.
auto cvtTy = cvt.getType().cast<RankedTensorType>();
if (!cvtTy.getEncoding().isa<DotOperandEncodingAttr>())
auto dotOpEnc = cvtTy.getEncoding().dyn_cast<DotOperandEncodingAttr>();
if (!dotOpEnc)
return failure();

auto src = cvt.getSrc().getDefiningOp();
Expand All @@ -126,6 +127,12 @@ class HoistLayoutConversion : public OpRewritePattern<ConvertLayoutOp> {
[](Type ty) { return ty.isa<RankedTensorType>(); }))
return failure();

// Quick handling to fix loading issues when computing the original
// bitwidth is unable to realize that there is a mixed-precision dot
// (hence kWidth = 1) but wants to hoist through the type conversion.
if (isa<arith::ExtFOp>(src) && dotOpEnc.getKWidth() == 1)
return failure();

// Only consider custom conversions or arith ops.
// TODO(jlebar): Is this too restrictive?
if (!isa<FpToFpOp, BitcastOp>(src) && !isPureUnaryInlineAsm(src) &&
Expand All @@ -138,6 +145,14 @@ class HoistLayoutConversion : public OpRewritePattern<ConvertLayoutOp> {
if (isa<arith::TruncIOp, arith::TruncFOp, arith::SelectOp>(src))
return failure();

// Don't hoist through u1 -> fp casts as they aren't supported in
// ElementwiseOpToLLVM::reorderValues().
if (isa<arith::UIToFPOp>(src)) {
Type srcType = getElementTypeOrSelf(src->getOperand(0));
if (srcType.isInteger(1))
return failure();
}

// Check that the conversion is transitively dependent on a load, and all
// operations between the load and the conversion are layout preserving.
//
Expand Down
9 changes: 9 additions & 0 deletions lib/Dialect/TritonGPU/Transforms/Prefetch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,15 @@ LogicalResult Prefetcher::initialize() {
break;
if (!op->getResult(0).hasOneUse())
break;
// Similar to issues faced in HoistLayoutConversion pattern in
// OptimizeDotOperands.cpp, we can't propagate through type casts from
// predicates as they aren't supported in Triton when encoded with dot_op
// layout.
if (isa<arith::UIToFPOp>(op)) {
Type srcType = getElementTypeOrSelf(op->getOperand(0));
if (srcType.isInteger(1))
break;
}
rets.push_back(op->getOperand(0));
if (auto cvt = dyn_cast<triton::gpu::LocalLoadOp>(op)) {
foundConvertFromShared = true;
Expand Down
45 changes: 1 addition & 44 deletions lib/Dialect/TritonGPU/Transforms/RemoveLayoutConversions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -170,11 +170,6 @@ class LayoutRematerialization {
ConvertLayoutOp convertOp);

private:
void updateRematMapping(SmallVector<std::tuple<Value, Value>> &values);
// Existing tuples of (value, layout) that needs to be updated when recreating
// scf ops. This prevents keeping track of Values that have been delete when
// rewriting slices.
DenseMap<Value, Attribute> mappedValues;
// map of the values remat based on encoding.
DenseMap<std::pair<Value, Attribute>, Value> rematMapping;
// DenseMap<std::pair<Operation*, Attribute>, Operation*>
Expand All @@ -186,7 +181,6 @@ void LayoutRematerialization::addRematValue(Value old, Attribute encoding,
Value newV) {
LDBG("addRematValue " << old << " encoding " << encoding << " " << newV);
rematMapping[{old, encoding}] = newV;
mappedValues[old] = encoding;
}

// Remove unneeded values now that we are done with the rematMapping.
Expand Down Expand Up @@ -802,44 +796,12 @@ bool canBeRemat(Operation *op) {
return true;
}

void LayoutRematerialization::updateRematMapping(
SmallVector<std::tuple<Value, Value>> &values) {
for (auto [old, newV] : values) {
auto it = mappedValues.find(old);
if (it != mappedValues.end()) {
Attribute encoding = it->second;
auto rematIt = rematMapping.find({old, it->second});
assert(rematIt != rematMapping.end());
Value replacedValue = rematIt->second;
rematMapping.erase(rematIt);
mappedValues.erase(it);
// Loop through the replacement value to find the new version of remat
// value. This should be okay as the number of values should be small.
for (auto [before, after] : values) {
if (before == replacedValue) {
replacedValue = after;
break;
}
}
rematMapping[{newV, encoding}] = replacedValue;
mappedValues[newV] = encoding;
}
}
}

void LayoutRematerialization::rewriteSlice(SetVector<Value> &slice,
DenseMap<Value, Attribute> &layout,
ConvertLayoutOp convertOp,
IRMapping &mapping) {
SetVector<Operation *> opsToRewrite;
for (Value v : slice) {
auto layoutIt = layout.find(v);
assert(layoutIt != layout.end());
// If we already have a remat value for this value, use it.
if (hasRematValue(v, layoutIt->second)) {
mapping.map(v, getRematValue(v, layoutIt->second));
continue;
}
if (v.getDefiningOp()) {
opsToRewrite.insert(v.getDefiningOp());
if (auto ifOp = v.getDefiningOp<scf::IfOp>()) {
Expand Down Expand Up @@ -917,8 +879,7 @@ void LayoutRematerialization::rewriteSlice(SetVector<Value> &slice,
if (slice.count(res)) {
// Why can't we use res instead of ifOp.getResult(oldIdx)?
mapping.map(ifOp.getResult(oldIdx), newIfOp.getResult(newIdx));
addRematValue(ifOp.getResult(oldIdx), layout[res],
newIfOp.getResult(newIdx));
addRematValue(res, layout[res], newIfOp.getResult(newIdx));
++newIdx;
}
++oldIdx;
Expand Down Expand Up @@ -947,8 +908,6 @@ void LayoutRematerialization::rewriteSlice(SetVector<Value> &slice,
auto cvt = builder.create<ConvertLayoutOp>(op->getLoc(), newType,
newOp->getResult(0));
mapping.map(op->getResult(0), cvt.getResult());
addRematValue(op->getResult(0), layout[op->getResult(0)],
cvt.getResult());
continue;
}
Operation *newOp = builder.clone(*op, mapping);
Expand All @@ -960,14 +919,12 @@ void LayoutRematerialization::rewriteSlice(SetVector<Value> &slice,
old.getType().cast<RankedTensorType>().getShape(),
old.getType().cast<RankedTensorType>().getElementType(), it->second);
newV.setType(newType);
addRematValue(old, it->second, newV);
}
}
// Check mapping and see if there are existing convertOps on the old Argument
convertOp.replaceAllUsesWith(mapping.lookup(convertOp.getSrc()));
opToDelete.insert(convertOp);

updateRematMapping(replacements);
for (auto &kv : replacements) {
builder.replaceAllUsesWith(std::get<0>(kv), std::get<1>(kv));
}
Expand Down
3 changes: 2 additions & 1 deletion lib/Dialect/TritonNvidiaGPU/Transforms/FenceInsertion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,8 @@ struct FenceInsertionPass
// Only insert fences for compute capability 9.0
if (computeCapability < 90)
return;
if (::triton::tools::getBoolEnv("DISABLE_MMA_V3"))
// TODO(b/311157761): enable mma_v3
if (!::triton::tools::getBoolEnv("ENABLE_MMA_V3"))
return;
ModuleOp mod = getOperation();
mod.walk([&](Operation *op) {
Expand Down
3 changes: 2 additions & 1 deletion lib/Target/LLVMIR/LLVMDIScope.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,8 @@ struct LLVMDIScopePass : public LLVMDIScopeBase<LLVMDIScopePass> {
compileUnitAttr = LLVM::DICompileUnitAttr::get(
context, distinctId, llvm::dwarf::DW_LANG_C, fileAttr,
StringAttr::get(context, "triton"),
/*isOptimized=*/true, LLVM::DIEmissionKind::LineTablesOnly);
/*isOptimized=*/true, LLVM::DIEmissionKind::LineTablesOnly,
LLVM::DINameTableKind::Default);
}
subprogramFlags = subprogramFlags | LLVM::DISubprogramFlags::Definition;
} else {
Expand Down
91 changes: 91 additions & 0 deletions python/BUILD
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
# NOTE: Do not depend on any targets from this directory,
# but use //third_party/py/triton instead.

load("@pybind11_bazel//:build_defs.bzl", "pybind_extension")

package(
default_applicable_licenses = ["//:license"],
default_visibility = [
"//third_party/py/triton:__pkg__",
"@triton//python:__subpackages__",
],
)

cc_library(
name = "passes",
hdrs = ["src/passes.h"],
includes = ["src"],
visibility = ["@triton//third_party:__subpackages__"],
)

pybind_extension(
name = "libtriton",
srcs = [
"src/interpreter.cc",
"src/ir.cc",
"src/llvm.cc",
"src/main.cc",
"src/passes.cc",
],
copts = ["-DTRITON_BACKENDS_TUPLE=(nvidia)"],
deps = [
":passes",
"@llvm-project//llvm:Core",
"@llvm-project//llvm:IPO",
"@llvm-project//llvm:IRReader",
"@llvm-project//llvm:InstCombine",
"@llvm-project//llvm:Linker",
"@llvm-project//llvm:MC",
"@llvm-project//llvm:Passes",
"@llvm-project//llvm:Support",
"@llvm-project//llvm:Target",
"@llvm-project//mlir:BuiltinToLLVMIRTranslation",
"@llvm-project//mlir:BytecodeWriter",
"@llvm-project//mlir:ControlFlowDialect",
"@llvm-project//mlir:ConversionPasses",
"@llvm-project//mlir:IR",
"@llvm-project//mlir:IndexDialect",
"@llvm-project//mlir:LLVMDialect",
"@llvm-project//mlir:LLVMToLLVMIRTranslation",
"@llvm-project//mlir:NVVMToLLVMIRTranslation",
"@llvm-project//mlir:Parser",
"@llvm-project//mlir:Pass",
"@llvm-project//mlir:Support",
"@llvm-project//mlir:ToLLVMIRTranslation",
"@llvm-project//mlir:Transforms",
"//:TritonAnalysis",
"//:TritonDialects",
"//:TritonGPUToLLVM",
"//:TritonGPUTransforms",
"//:TritonHSACO",
"//:TritonLLVMIR",
"//:TritonNvidiaGPUTransforms",
"//:TritonPTX",
"//:TritonToTritonGPU",
"//:TritonTools",
"//:TritonTransforms",
"@triton//third_party/nvidia:triton_nvidia",
],
)

pybind_extension(
name = "triton_launcher",
srcs = [
"triton/compiler/triton_launcher.c",
],
tags = [
"config-cuda-only",
"requires-gpu-sm80",
],
deps = [
"@local_config_cuda//cuda:cuda_headers",
"@local_config_cuda//cuda:cuda_runtime",
],
)

filegroup(
name = "files",
srcs = glob(
include = ["triton/**/*.py"],
),
)
26 changes: 26 additions & 0 deletions python/test/regression/BUILD
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
load("//third_party/py/pytest:pytest_defs.bzl", "pytest_multi_tests")

package(
default_applicable_licenses = ["//:license"],
)

pytest_multi_tests(
name = "tests",
size = "large",
shard_count = 10,
tags = [
"config-cuda-only",
"requires-gpu-sm80",
],
tests = glob(
include = ["test_*.py"],
exclude = [
"test_performance.py", #TODO(b/321005767): fix failing test
"test_cast_matmul.py", #Broken upstream and disabled on OAI CI
],
),
deps = [
"//third_party/py/torch:pytorch",
"//third_party/py/triton",
],
)
Loading

0 comments on commit 5441efa

Please sign in to comment.