From 7e013d6034bd8e81a6434f515f545b4375078512 Mon Sep 17 00:00:00 2001 From: Peter Klausler <35819229+klausler@users.noreply.github.com> Date: Wed, 13 Sep 2023 15:02:11 -0700 Subject: [PATCH 01/22] [flang] Accept intrinsic functions in DATA statement variables (#66229) Pure intrinsic functions are acceptable in constant expressions so long as their arguments are constant expressions. Allow them to appear in subscripts in DATA statement variables. Fixes https://github.com/llvm/llvm-project/issues/65046. --- flang/lib/Evaluate/check-expression.cpp | 11 +++++ flang/lib/Semantics/check-data.cpp | 56 +++++++++++++------------ flang/test/Semantics/data05.f90 | 4 ++ 3 files changed, 45 insertions(+), 26 deletions(-) diff --git a/flang/lib/Evaluate/check-expression.cpp b/flang/lib/Evaluate/check-expression.cpp index cfc67bf70dd0d..29bd6eaa466bb 100644 --- a/flang/lib/Evaluate/check-expression.cpp +++ b/flang/lib/Evaluate/check-expression.cpp @@ -114,6 +114,7 @@ bool IsConstantExprHelper::operator()( // LBOUND, UBOUND, and SIZE with truly constant DIM= arguments will have // been rewritten into DescriptorInquiry operations. if (const auto *intrinsic{std::get_if(&call.proc().u)}) { + const characteristics::Procedure &proc{intrinsic->characteristics.value()}; if (intrinsic->name == "kind" || intrinsic->name == IntrinsicProcTable::InvalidName || call.arguments().empty() || !call.arguments()[0]) { @@ -129,6 +130,16 @@ bool IsConstantExprHelper::operator()( } else if (intrinsic->name == "shape" || intrinsic->name == "size") { auto shape{GetShape(call.arguments()[0]->UnwrapExpr())}; return shape && IsConstantExprShape(*shape); + } else if (proc.IsPure()) { + for (const auto &arg : call.arguments()) { + if (!arg) { + return false; + } else if (const auto *expr{arg->UnwrapExpr()}; + !expr || !(*this)(*expr)) { + return false; + } + } + return true; } // TODO: STORAGE_SIZE } diff --git a/flang/lib/Semantics/check-data.cpp b/flang/lib/Semantics/check-data.cpp index 6916870907a63..72e021d03a969 100644 --- a/flang/lib/Semantics/check-data.cpp +++ b/flang/lib/Semantics/check-data.cpp @@ -102,16 +102,16 @@ class DataVarChecker : public evaluate::AllTraverse { lastSymbol.name().ToString()); return false; } - RestrictPointer(); + auto restorer{common::ScopedSet(isPointerAllowed_, false)}; + return (*this)(component.base()) && (*this)(lastSymbol); + } else if (IsPointer(lastSymbol)) { // C877 + context_.Say(source_, + "Data object must not contain pointer '%s' as a non-rightmost part"_err_en_US, + lastSymbol.name().ToString()); + return false; } else { - if (IsPointer(lastSymbol)) { // C877 - context_.Say(source_, - "Data object must not contain pointer '%s' as a non-rightmost part"_err_en_US, - lastSymbol.name().ToString()); - return false; - } + return (*this)(component.base()) && (*this)(lastSymbol); } - return (*this)(component.base()) && (*this)(lastSymbol); } bool operator()(const evaluate::ArrayRef &arrayRef) { hasSubscript_ = true; @@ -128,29 +128,32 @@ class DataVarChecker : public evaluate::AllTraverse { return false; } bool operator()(const evaluate::Subscript &subs) { - DataVarChecker subscriptChecker{context_, source_}; - subscriptChecker.RestrictPointer(); + auto restorer1{common::ScopedSet(isPointerAllowed_, false)}; + auto restorer2{common::ScopedSet(isFunctionAllowed_, true)}; return common::visit( - common::visitors{ - [&](const evaluate::IndirectSubscriptIntegerExpr &expr) { - return CheckSubscriptExpr(expr); - }, - [&](const evaluate::Triplet &triplet) { - return CheckSubscriptExpr(triplet.lower()) && - CheckSubscriptExpr(triplet.upper()) && - CheckSubscriptExpr(triplet.stride()); - }, - }, - subs.u) && - subscriptChecker(subs.u); + common::visitors{ + [&](const evaluate::IndirectSubscriptIntegerExpr &expr) { + return CheckSubscriptExpr(expr); + }, + [&](const evaluate::Triplet &triplet) { + return CheckSubscriptExpr(triplet.lower()) && + CheckSubscriptExpr(triplet.upper()) && + CheckSubscriptExpr(triplet.stride()); + }, + }, + subs.u); } template bool operator()(const evaluate::FunctionRef &) const { // C875 - context_.Say(source_, - "Data object variable must not be a function reference"_err_en_US); - return false; + if (isFunctionAllowed_) { + // Must have been validated as a constant expression + return true; + } else { + context_.Say(source_, + "Data object variable must not be a function reference"_err_en_US); + return false; + } } - void RestrictPointer() { isPointerAllowed_ = false; } private: bool CheckSubscriptExpr( @@ -178,6 +181,7 @@ class DataVarChecker : public evaluate::AllTraverse { bool hasSubscript_{false}; bool isPointerAllowed_{true}; bool isFirstSymbol_{true}; + bool isFunctionAllowed_{false}; }; static bool IsValidDataObject(const SomeExpr &expr) { // C878, C879 diff --git a/flang/test/Semantics/data05.f90 b/flang/test/Semantics/data05.f90 index 02bfd46632645..f9fc858c8d543 100644 --- a/flang/test/Semantics/data05.f90 +++ b/flang/test/Semantics/data05.f90 @@ -93,4 +93,8 @@ subroutine s13 integer j(2) data j(2:1), j(1:2) /1,2/ ! CHECK: j (InDataStmt) size=8 offset=0: ObjectEntity type: INTEGER(4) shape: 1_8:2_8 init:[INTEGER(4)::1_4,2_4] end subroutine + subroutine s14 + integer j(0:1) + data (j(modulo(k,2)),k=1,2) /3,4/ ! CHECK: j (InDataStmt) size=8 offset=0: ObjectEntity type: INTEGER(4) shape: 0_8:1_8 init:[INTEGER(4)::4_4,3_4] + end subroutine end module From f94695b6eb0e0b2bb059c33903cc7dd4a3ddd47f Mon Sep 17 00:00:00 2001 From: Jan Svoboda Date: Sat, 9 Sep 2023 18:59:51 -0700 Subject: [PATCH 02/22] [clang] NFCI: Use `FileEntryRef` in `FileManager::getBufferForFile()` --- clang/include/clang/Basic/FileManager.h | 2 +- clang/include/clang/Lex/HeaderMap.h | 3 +-- clang/lib/Basic/FileManager.cpp | 5 +++-- clang/lib/Lex/HeaderMap.cpp | 5 ++--- clang/lib/Serialization/ASTReader.cpp | 2 +- clang/lib/Serialization/ModuleManager.cpp | 2 +- clang/unittests/Lex/HeaderSearchTest.cpp | 2 +- 7 files changed, 10 insertions(+), 11 deletions(-) diff --git a/clang/include/clang/Basic/FileManager.h b/clang/include/clang/Basic/FileManager.h index 58ff42ee5c241..56c45e3cf68cd 100644 --- a/clang/include/clang/Basic/FileManager.h +++ b/clang/include/clang/Basic/FileManager.h @@ -275,7 +275,7 @@ class FileManager : public RefCountedBase { /// Open the specified file as a MemoryBuffer, returning a new /// MemoryBuffer if successful, otherwise returning null. llvm::ErrorOr> - getBufferForFile(const FileEntry *Entry, bool isVolatile = false, + getBufferForFile(FileEntryRef Entry, bool isVolatile = false, bool RequiresNullTerminator = true); llvm::ErrorOr> getBufferForFile(StringRef Filename, bool isVolatile = false, diff --git a/clang/include/clang/Lex/HeaderMap.h b/clang/include/clang/Lex/HeaderMap.h index de753cbfec3ce..9d88b36bfd8e4 100644 --- a/clang/include/clang/Lex/HeaderMap.h +++ b/clang/include/clang/Lex/HeaderMap.h @@ -88,8 +88,7 @@ class HeaderMap : private HeaderMapImpl { public: /// This attempts to load the specified file as a header map. If it doesn't /// look like a HeaderMap, it gives up and returns null. - static std::unique_ptr Create(const FileEntry *FE, - FileManager &FM); + static std::unique_ptr Create(FileEntryRef FE, FileManager &FM); using HeaderMapImpl::dump; using HeaderMapImpl::forEachKey; diff --git a/clang/lib/Basic/FileManager.cpp b/clang/lib/Basic/FileManager.cpp index c3eec80caaf30..30e2916c7f2a7 100644 --- a/clang/lib/Basic/FileManager.cpp +++ b/clang/lib/Basic/FileManager.cpp @@ -536,8 +536,9 @@ void FileManager::fillRealPathName(FileEntry *UFE, llvm::StringRef FileName) { } llvm::ErrorOr> -FileManager::getBufferForFile(const FileEntry *Entry, bool isVolatile, +FileManager::getBufferForFile(FileEntryRef FE, bool isVolatile, bool RequiresNullTerminator) { + const FileEntry *Entry = &FE.getFileEntry(); // If the content is living on the file entry, return a reference to it. if (Entry->Content) return llvm::MemoryBuffer::getMemBuffer(Entry->Content->getMemBufferRef()); @@ -548,7 +549,7 @@ FileManager::getBufferForFile(const FileEntry *Entry, bool isVolatile, if (isVolatile || Entry->isNamedPipe()) FileSize = -1; - StringRef Filename = Entry->getName(); + StringRef Filename = FE.getName(); // If the file is already open, use the open file descriptor. if (Entry->File) { auto Result = Entry->File->getBuffer(Filename, FileSize, diff --git a/clang/lib/Lex/HeaderMap.cpp b/clang/lib/Lex/HeaderMap.cpp index da0b8898f6900..2b26426735860 100644 --- a/clang/lib/Lex/HeaderMap.cpp +++ b/clang/lib/Lex/HeaderMap.cpp @@ -48,10 +48,9 @@ static inline unsigned HashHMapKey(StringRef Str) { /// map. If it doesn't look like a HeaderMap, it gives up and returns null. /// If it looks like a HeaderMap but is obviously corrupted, it puts a reason /// into the string error argument and returns null. -std::unique_ptr HeaderMap::Create(const FileEntry *FE, - FileManager &FM) { +std::unique_ptr HeaderMap::Create(FileEntryRef FE, FileManager &FM) { // If the file is too small to be a header map, ignore it. - unsigned FileSize = FE->getSize(); + unsigned FileSize = FE.getSize(); if (FileSize <= sizeof(HMapHeader)) return nullptr; auto FileBuffer = FM.getBufferForFile(FE); diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp index 2289edc511e96..6842c0da79c44 100644 --- a/clang/lib/Serialization/ASTReader.cpp +++ b/clang/lib/Serialization/ASTReader.cpp @@ -2501,7 +2501,7 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) { // accept the cached file as legit. if (ValidateASTInputFilesContent && StoredContentHash != static_cast(llvm::hash_code(-1))) { - auto MemBuffOrError = FileMgr.getBufferForFile(File); + auto MemBuffOrError = FileMgr.getBufferForFile(*File); if (!MemBuffOrError) { if (!Complain) return MTimeChange; diff --git a/clang/lib/Serialization/ModuleManager.cpp b/clang/lib/Serialization/ModuleManager.cpp index 112d27e179dc3..de4cd3d05853a 100644 --- a/clang/lib/Serialization/ModuleManager.cpp +++ b/clang/lib/Serialization/ModuleManager.cpp @@ -209,7 +209,7 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type, // // RequiresNullTerminator is false because module files don't need it, and // this allows the file to still be mmapped. - Buf = FileMgr.getBufferForFile(NewModule->File, + Buf = FileMgr.getBufferForFile(*NewModule->File, /*IsVolatile=*/true, /*RequiresNullTerminator=*/false); } diff --git a/clang/unittests/Lex/HeaderSearchTest.cpp b/clang/unittests/Lex/HeaderSearchTest.cpp index cc30b0a4304ff..c578fa72c859e 100644 --- a/clang/unittests/Lex/HeaderSearchTest.cpp +++ b/clang/unittests/Lex/HeaderSearchTest.cpp @@ -67,7 +67,7 @@ class HeaderSearchTest : public ::testing::Test { VFS->addFile(Filename, 0, std::move(Buf), /*User=*/std::nullopt, /*Group=*/std::nullopt, llvm::sys::fs::file_type::regular_file); - auto FE = FileMgr.getFile(Filename, true); + auto FE = FileMgr.getOptionalFileRef(Filename, true); assert(FE); // Test class supports only one HMap at a time. From c46a04339a7edd2b573b7a9796426aff6d154266 Mon Sep 17 00:00:00 2001 From: Daniil Dudkin <39276703+unterumarmung@users.noreply.github.com> Date: Thu, 14 Sep 2023 01:09:37 +0300 Subject: [PATCH 03/22] =?UTF-8?q?[mlir][arith]=20Rename=20`AtomicRMWKind`'?= =?UTF-8?q?s=20`maxf`=20=E2=86=92=20`maximumf`,=20`minf`=20=E2=86=92=20`mi?= =?UTF-8?q?nimumf`=20(#66135)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch is part of a larger initiative aimed at fixing floating-point `max` and `min` operations in MLIR: https://discourse.llvm.org/t/rfc-fix-floating-point-max-and-min-operations-in-mlir/72671. This commit renames `maxf` and `minf` enumerators of `AtomicRMWKind` to better reflect the current naming scheme and the goals of the RFC. --- .../mlir/Dialect/Arith/IR/ArithBase.td | 30 +++++++++---------- .../Conversion/MemRefToLLVM/MemRefToLLVM.cpp | 4 +-- .../Affine/Analysis/AffineAnalysis.cpp | 6 ++-- mlir/lib/Dialect/Arith/IR/ArithOps.cpp | 12 ++++---- mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp | 6 ++-- mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp | 4 +-- .../Dialect/MemRef/Transforms/ExpandOps.cpp | 10 +++---- mlir/lib/Dialect/Vector/IR/VectorOps.cpp | 4 +-- mlir/test/Dialect/Affine/invalid.mlir | 2 +- mlir/test/Dialect/Affine/ops.mlir | 4 +-- mlir/test/Dialect/MemRef/expand-ops.mlir | 2 +- 11 files changed, 43 insertions(+), 41 deletions(-) diff --git a/mlir/include/mlir/Dialect/Arith/IR/ArithBase.td b/mlir/include/mlir/Dialect/Arith/IR/ArithBase.td index 78fd7bdf012f8..a833e9c8220af 100644 --- a/mlir/include/mlir/Dialect/Arith/IR/ArithBase.td +++ b/mlir/include/mlir/Dialect/Arith/IR/ArithBase.td @@ -69,25 +69,25 @@ def Arith_CmpIPredicateAttr : I64EnumAttr< let cppNamespace = "::mlir::arith"; } -def ATOMIC_RMW_KIND_ADDF : I64EnumAttrCase<"addf", 0>; -def ATOMIC_RMW_KIND_ADDI : I64EnumAttrCase<"addi", 1>; -def ATOMIC_RMW_KIND_ASSIGN : I64EnumAttrCase<"assign", 2>; -def ATOMIC_RMW_KIND_MAXF : I64EnumAttrCase<"maxf", 3>; -def ATOMIC_RMW_KIND_MAXS : I64EnumAttrCase<"maxs", 4>; -def ATOMIC_RMW_KIND_MAXU : I64EnumAttrCase<"maxu", 5>; -def ATOMIC_RMW_KIND_MINF : I64EnumAttrCase<"minf", 6>; -def ATOMIC_RMW_KIND_MINS : I64EnumAttrCase<"mins", 7>; -def ATOMIC_RMW_KIND_MINU : I64EnumAttrCase<"minu", 8>; -def ATOMIC_RMW_KIND_MULF : I64EnumAttrCase<"mulf", 9>; -def ATOMIC_RMW_KIND_MULI : I64EnumAttrCase<"muli", 10>; -def ATOMIC_RMW_KIND_ORI : I64EnumAttrCase<"ori", 11>; -def ATOMIC_RMW_KIND_ANDI : I64EnumAttrCase<"andi", 12>; +def ATOMIC_RMW_KIND_ADDF : I64EnumAttrCase<"addf", 0>; +def ATOMIC_RMW_KIND_ADDI : I64EnumAttrCase<"addi", 1>; +def ATOMIC_RMW_KIND_ASSIGN : I64EnumAttrCase<"assign", 2>; +def ATOMIC_RMW_KIND_MAXIMUMF : I64EnumAttrCase<"maximumf", 3>; +def ATOMIC_RMW_KIND_MAXS : I64EnumAttrCase<"maxs", 4>; +def ATOMIC_RMW_KIND_MAXU : I64EnumAttrCase<"maxu", 5>; +def ATOMIC_RMW_KIND_MINIMUMF : I64EnumAttrCase<"minimumf", 6>; +def ATOMIC_RMW_KIND_MINS : I64EnumAttrCase<"mins", 7>; +def ATOMIC_RMW_KIND_MINU : I64EnumAttrCase<"minu", 8>; +def ATOMIC_RMW_KIND_MULF : I64EnumAttrCase<"mulf", 9>; +def ATOMIC_RMW_KIND_MULI : I64EnumAttrCase<"muli", 10>; +def ATOMIC_RMW_KIND_ORI : I64EnumAttrCase<"ori", 11>; +def ATOMIC_RMW_KIND_ANDI : I64EnumAttrCase<"andi", 12>; def AtomicRMWKindAttr : I64EnumAttr< "AtomicRMWKind", "", [ATOMIC_RMW_KIND_ADDF, ATOMIC_RMW_KIND_ADDI, ATOMIC_RMW_KIND_ASSIGN, - ATOMIC_RMW_KIND_MAXF, ATOMIC_RMW_KIND_MAXS, ATOMIC_RMW_KIND_MAXU, - ATOMIC_RMW_KIND_MINF, ATOMIC_RMW_KIND_MINS, ATOMIC_RMW_KIND_MINU, + ATOMIC_RMW_KIND_MAXIMUMF, ATOMIC_RMW_KIND_MAXS, ATOMIC_RMW_KIND_MAXU, + ATOMIC_RMW_KIND_MINIMUMF, ATOMIC_RMW_KIND_MINS, ATOMIC_RMW_KIND_MINU, ATOMIC_RMW_KIND_MULF, ATOMIC_RMW_KIND_MULI, ATOMIC_RMW_KIND_ORI, ATOMIC_RMW_KIND_ANDI]> { let cppNamespace = "::mlir::arith"; diff --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp index 516a6b8ed88e6..97faefe2cd4d6 100644 --- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp +++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp @@ -1594,13 +1594,13 @@ matchSimpleAtomicOp(memref::AtomicRMWOp atomicOp) { return LLVM::AtomicBinOp::add; case arith::AtomicRMWKind::assign: return LLVM::AtomicBinOp::xchg; - case arith::AtomicRMWKind::maxf: + case arith::AtomicRMWKind::maximumf: return LLVM::AtomicBinOp::fmax; case arith::AtomicRMWKind::maxs: return LLVM::AtomicBinOp::max; case arith::AtomicRMWKind::maxu: return LLVM::AtomicBinOp::umax; - case arith::AtomicRMWKind::minf: + case arith::AtomicRMWKind::minimumf: return LLVM::AtomicBinOp::fmin; case arith::AtomicRMWKind::mins: return LLVM::AtomicBinOp::min; diff --git a/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp index ab1dfbdb419b8..1ba0bc8b6bfbe 100644 --- a/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp +++ b/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp @@ -60,8 +60,10 @@ static Value getSupportedReduction(AffineForOp forOp, unsigned pos, .Case([](arith::AndIOp) { return arith::AtomicRMWKind::andi; }) .Case([](arith::OrIOp) { return arith::AtomicRMWKind::ori; }) .Case([](arith::MulIOp) { return arith::AtomicRMWKind::muli; }) - .Case([](arith::MinimumFOp) { return arith::AtomicRMWKind::minf; }) - .Case([](arith::MaximumFOp) { return arith::AtomicRMWKind::maxf; }) + .Case( + [](arith::MinimumFOp) { return arith::AtomicRMWKind::minimumf; }) + .Case( + [](arith::MaximumFOp) { return arith::AtomicRMWKind::maximumf; }) .Case([](arith::MinSIOp) { return arith::AtomicRMWKind::mins; }) .Case([](arith::MaxSIOp) { return arith::AtomicRMWKind::maxs; }) .Case([](arith::MinUIOp) { return arith::AtomicRMWKind::minu; }) diff --git a/mlir/lib/Dialect/Arith/IR/ArithOps.cpp b/mlir/lib/Dialect/Arith/IR/ArithOps.cpp index 77bf8a438d6db..1e34ac598860f 100644 --- a/mlir/lib/Dialect/Arith/IR/ArithOps.cpp +++ b/mlir/lib/Dialect/Arith/IR/ArithOps.cpp @@ -2369,7 +2369,7 @@ TypedAttr mlir::arith::getIdentityValueAttr(AtomicRMWKind kind, Type resultType, OpBuilder &builder, Location loc, bool useOnlyFiniteValue) { switch (kind) { - case AtomicRMWKind::maxf: { + case AtomicRMWKind::maximumf: { const llvm::fltSemantics &semantic = llvm::cast(resultType).getFloatSemantics(); APFloat identity = useOnlyFiniteValue @@ -2390,7 +2390,7 @@ TypedAttr mlir::arith::getIdentityValueAttr(AtomicRMWKind kind, Type resultType, return builder.getIntegerAttr( resultType, APInt::getSignedMinValue( llvm::cast(resultType).getWidth())); - case AtomicRMWKind::minf: { + case AtomicRMWKind::minimumf: { const llvm::fltSemantics &semantic = llvm::cast(resultType).getFloatSemantics(); APFloat identity = useOnlyFiniteValue @@ -2426,8 +2426,8 @@ std::optional mlir::arith::getNeutralElement(Operation *op) { // Floating-point operations. .Case([](arith::AddFOp op) { return AtomicRMWKind::addf; }) .Case([](arith::MulFOp op) { return AtomicRMWKind::mulf; }) - .Case([](arith::MaximumFOp op) { return AtomicRMWKind::maxf; }) - .Case([](arith::MinimumFOp op) { return AtomicRMWKind::minf; }) + .Case([](arith::MaximumFOp op) { return AtomicRMWKind::maximumf; }) + .Case([](arith::MinimumFOp op) { return AtomicRMWKind::minimumf; }) // Integer operations. .Case([](arith::AddIOp op) { return AtomicRMWKind::addi; }) .Case([](arith::OrIOp op) { return AtomicRMWKind::ori; }) @@ -2482,9 +2482,9 @@ Value mlir::arith::getReductionOp(AtomicRMWKind op, OpBuilder &builder, return builder.create(loc, lhs, rhs); case AtomicRMWKind::muli: return builder.create(loc, lhs, rhs); - case AtomicRMWKind::maxf: + case AtomicRMWKind::maximumf: return builder.create(loc, lhs, rhs); - case AtomicRMWKind::minf: + case AtomicRMWKind::minimumf: return builder.create(loc, lhs, rhs); case AtomicRMWKind::maxs: return builder.create(loc, lhs, rhs); diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp index d26e68cb47ac1..f87aa4559e10a 100644 --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -2549,9 +2549,9 @@ FailureOr> SoftmaxOp::decomposeOperation(OpBuilder &b) { dims.erase(dims.begin() + reductionDim); // Step 1: Compute max along dim. Value outputReduce = b.create(loc, dims, elementType); - Value neutralForMaxF = - arith::getIdentityValue(arith::AtomicRMWKind::maxf, elementType, b, loc, - /*useOnlyFiniteValue=*/true); + Value neutralForMaxF = arith::getIdentityValue(arith::AtomicRMWKind::maximumf, + elementType, b, loc, + /*useOnlyFiniteValue=*/true); Value neutralForMaxFInit = b.create(loc, Value{neutralForMaxF}, outputReduce) .result(); diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp index 42da47a5381e7..215a8f5e7d18b 100644 --- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp @@ -3402,8 +3402,8 @@ LogicalResult AtomicRMWOp::verify() { "expects the number of subscripts to be equal to memref rank"); switch (getKind()) { case arith::AtomicRMWKind::addf: - case arith::AtomicRMWKind::maxf: - case arith::AtomicRMWKind::minf: + case arith::AtomicRMWKind::maximumf: + case arith::AtomicRMWKind::minimumf: case arith::AtomicRMWKind::mulf: if (!llvm::isa(getValue().getType())) return emitOpError() << "with kind '" diff --git a/mlir/lib/Dialect/MemRef/Transforms/ExpandOps.cpp b/mlir/lib/Dialect/MemRef/Transforms/ExpandOps.cpp index 8a276ebbff6a9..7c3ca19b789c7 100644 --- a/mlir/lib/Dialect/MemRef/Transforms/ExpandOps.cpp +++ b/mlir/lib/Dialect/MemRef/Transforms/ExpandOps.cpp @@ -36,7 +36,7 @@ namespace { /// AtomicRMWOpLowering pattern, e.g. with "minf" or "maxf" attributes, to /// `memref.generic_atomic_rmw` with the expanded code. /// -/// %x = atomic_rmw "maxf" %fval, %F[%i] : (f32, memref<10xf32>) -> f32 +/// %x = atomic_rmw "maximumf" %fval, %F[%i] : (f32, memref<10xf32>) -> f32 /// /// will be lowered to /// @@ -54,10 +54,10 @@ struct AtomicRMWOpConverter : public OpRewritePattern { PatternRewriter &rewriter) const final { arith::CmpFPredicate predicate; switch (op.getKind()) { - case arith::AtomicRMWKind::maxf: + case arith::AtomicRMWKind::maximumf: predicate = arith::CmpFPredicate::OGT; break; - case arith::AtomicRMWKind::minf: + case arith::AtomicRMWKind::minimumf: predicate = arith::CmpFPredicate::OLT; break; default: @@ -137,8 +137,8 @@ struct ExpandOpsPass : public memref::impl::ExpandOpsBase { target.addLegalDialect(); target.addDynamicallyLegalOp( [](memref::AtomicRMWOp op) { - return op.getKind() != arith::AtomicRMWKind::maxf && - op.getKind() != arith::AtomicRMWKind::minf; + return op.getKind() != arith::AtomicRMWKind::maximumf && + op.getKind() != arith::AtomicRMWKind::minimumf; }); target.addDynamicallyLegalOp([](memref::ReshapeOp op) { return !cast(op.getShape().getType()).hasStaticShape(); diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp index 9422936bf21e3..11aa76798bcaa 100644 --- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp +++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp @@ -493,7 +493,7 @@ Value mlir::vector::getVectorReductionOp(arith::AtomicRMWKind op, case arith::AtomicRMWKind::muli: return builder.create(vector.getLoc(), CombiningKind::MUL, vector); - case arith::AtomicRMWKind::minf: + case arith::AtomicRMWKind::minimumf: return builder.create(vector.getLoc(), CombiningKind::MINF, vector); case arith::AtomicRMWKind::mins: @@ -502,7 +502,7 @@ Value mlir::vector::getVectorReductionOp(arith::AtomicRMWKind op, case arith::AtomicRMWKind::minu: return builder.create(vector.getLoc(), CombiningKind::MINUI, vector); - case arith::AtomicRMWKind::maxf: + case arith::AtomicRMWKind::maximumf: return builder.create(vector.getLoc(), CombiningKind::MAXF, vector); case arith::AtomicRMWKind::maxs: diff --git a/mlir/test/Dialect/Affine/invalid.mlir b/mlir/test/Dialect/Affine/invalid.mlir index fd7d7df9d8735..1dc3451ed7db8 100644 --- a/mlir/test/Dialect/Affine/invalid.mlir +++ b/mlir/test/Dialect/Affine/invalid.mlir @@ -287,7 +287,7 @@ func.func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) { func.func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) { %0 = memref.alloc() : memref<100x100xi32> - %1 = affine.parallel (%i, %j) = (0, 0) to (100, 100) step (10, 10) reduce ("minf") -> (f32) { + %1 = affine.parallel (%i, %j) = (0, 0) to (100, 100) step (10, 10) reduce ("minimumf") -> (f32) { %2 = affine.load %0[%i, %j] : memref<100x100xi32> // expected-error@+1 {{types mismatch between yield op and its parent}} affine.yield %2 : i32 diff --git a/mlir/test/Dialect/Affine/ops.mlir b/mlir/test/Dialect/Affine/ops.mlir index f55d59a3e6470..1063f2a7ecba4 100644 --- a/mlir/test/Dialect/Affine/ops.mlir +++ b/mlir/test/Dialect/Affine/ops.mlir @@ -158,8 +158,8 @@ func.func @valid_symbol_affine_scope(%n : index, %A : memref) { func.func @parallel(%A : memref<100x100xf32>, %N : index) { // CHECK: affine.parallel (%[[I0:.*]], %[[J0:.*]]) = (0, 0) to (symbol(%[[N]]), 100) step (10, 10) affine.parallel (%i0, %j0) = (0, 0) to (symbol(%N), 100) step (10, 10) { - // CHECK: affine.parallel (%{{.*}}, %{{.*}}) = (%[[I0]], %[[J0]]) to (%[[I0]] + 10, %[[J0]] + 10) reduce ("minf", "maxf") -> (f32, f32) - %0:2 = affine.parallel (%i1, %j1) = (%i0, %j0) to (%i0 + 10, %j0 + 10) reduce ("minf", "maxf") -> (f32, f32) { + // CHECK: affine.parallel (%{{.*}}, %{{.*}}) = (%[[I0]], %[[J0]]) to (%[[I0]] + 10, %[[J0]] + 10) reduce ("minimumf", "maximumf") -> (f32, f32) + %0:2 = affine.parallel (%i1, %j1) = (%i0, %j0) to (%i0 + 10, %j0 + 10) reduce ("minimumf", "maximumf") -> (f32, f32) { %2 = affine.load %A[%i0 + %i0, %j0 + %j1] : memref<100x100xf32> affine.yield %2, %2 : f32, f32 } diff --git a/mlir/test/Dialect/MemRef/expand-ops.mlir b/mlir/test/Dialect/MemRef/expand-ops.mlir index a0d8e52d6e7e2..3234b35e99dcd 100644 --- a/mlir/test/Dialect/MemRef/expand-ops.mlir +++ b/mlir/test/Dialect/MemRef/expand-ops.mlir @@ -3,7 +3,7 @@ // CHECK-LABEL: func @atomic_rmw_to_generic // CHECK-SAME: ([[F:%.*]]: memref<10xf32>, [[f:%.*]]: f32, [[i:%.*]]: index) func.func @atomic_rmw_to_generic(%F: memref<10xf32>, %f: f32, %i: index) -> f32 { - %x = memref.atomic_rmw maxf %f, %F[%i] : (f32, memref<10xf32>) -> f32 + %x = memref.atomic_rmw maximumf %f, %F[%i] : (f32, memref<10xf32>) -> f32 return %x : f32 } // CHECK: %0 = memref.generic_atomic_rmw %arg0[%arg2] : memref<10xf32> { From 9918d2556c991699e8efb2f998ac7908af9d72e6 Mon Sep 17 00:00:00 2001 From: Aart Bik <39774503+aartbik@users.noreply.github.com> Date: Wed, 13 Sep 2023 15:11:35 -0700 Subject: [PATCH 04/22] [mlir][sparse] remove sparse output python example (#66298) Rationale: This was actually just a pure "string based" test with very little actual python usage. The output sparse tensor was handled via the deprecated convertFromMLIRSparseTensor method. --- .../test_elementwise_add_sparse_output.py | 108 ------------------ .../python/tools/np_to_sparse_tensor.py | 81 ------------- 2 files changed, 189 deletions(-) delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/python/tools/np_to_sparse_tensor.py diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py deleted file mode 100644 index a41bde1ee2d34..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py +++ /dev/null @@ -1,108 +0,0 @@ -# RUN: env SUPPORT_LIB=%mlir_c_runner_utils %PYTHON %s | FileCheck %s - -import ctypes -import numpy as np -import os -import sys - -from mlir import ir -from mlir import runtime as rt -from mlir.dialects import sparse_tensor as st -from mlir.dialects import builtin -from mlir.dialects.linalg.opdsl import lang as dsl - -_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(_SCRIPT_PATH) -from tools import np_to_sparse_tensor as test_tools -from tools import sparse_compiler - -# TODO: Use linalg_structured_op to generate the kernel after making it to -# handle sparse tensor outputs. -_KERNEL_STR = """ -#DCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ] -}> - -#trait_add_elt = { - indexing_maps = [ - affine_map<(i,j) -> (i,j)>, // A - affine_map<(i,j) -> (i,j)>, // B - affine_map<(i,j) -> (i,j)> // X (out) - ], - iterator_types = ["parallel", "parallel"], - doc = "X(i,j) = A(i,j) + B(i,j)" -} - -func.func @sparse_add_elt( - %arga: tensor<3x4xf64, #DCSR>, %argb: tensor<3x4xf64, #DCSR>) -> tensor<3x4xf64, #DCSR> { - %argx = bufferization.alloc_tensor() : tensor<3x4xf64, #DCSR> - %0 = linalg.generic #trait_add_elt - ins(%arga, %argb: tensor<3x4xf64, #DCSR>, tensor<3x4xf64, #DCSR>) - outs(%argx: tensor<3x4xf64, #DCSR>) { - ^bb(%a: f64, %b: f64, %x: f64): - %1 = arith.addf %a, %b : f64 - linalg.yield %1 : f64 - } -> tensor<3x4xf64, #DCSR> - return %0 : tensor<3x4xf64, #DCSR> -} - -func.func @main(%ad: tensor<3x4xf64>, %bd: tensor<3x4xf64>) -> tensor<3x4xf64, #DCSR> - attributes { llvm.emit_c_interface } { - %a = sparse_tensor.convert %ad : tensor<3x4xf64> to tensor<3x4xf64, #DCSR> - %b = sparse_tensor.convert %bd : tensor<3x4xf64> to tensor<3x4xf64, #DCSR> - %0 = call @sparse_add_elt(%a, %b) : (tensor<3x4xf64, #DCSR>, tensor<3x4xf64, #DCSR>) -> tensor<3x4xf64, #DCSR> - return %0 : tensor<3x4xf64, #DCSR> -} -""" - - -def _run_test(support_lib, kernel): - """Compiles, runs and checks results.""" - compiler = sparse_compiler.SparseCompiler( - options="", opt_level=2, shared_libs=[support_lib] - ) - module = ir.Module.parse(kernel) - engine = compiler.compile_and_jit(module) - - # Set up numpy inputs and buffer for output. - a = np.array( - [[1.1, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 6.6, 0.0]], np.float64 - ) - b = np.array( - [[1.1, 0.0, 0.0, 2.8], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], np.float64 - ) - - mem_a = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(a))) - mem_b = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(b))) - - # The sparse tensor output is a pointer to pointer of char. - out = ctypes.c_char(0) - mem_out = ctypes.pointer(ctypes.pointer(out)) - - # Invoke the kernel. - engine.invoke("main", mem_a, mem_b, mem_out) - - # Retrieve and check the result. - rank, nse, shape, values, indices = test_tools.sparse_tensor_to_coo_tensor( - support_lib, mem_out[0], np.float64 - ) - - # CHECK: PASSED - if np.allclose(values, [2.2, 2.8, 6.6]) and np.allclose( - indices, [[0, 0], [0, 3], [2, 2]] - ): - print("PASSED") - else: - quit("FAILURE") - - -def test_elementwise_add(): - # Obtain path to runtime support library. - support_lib = os.getenv("SUPPORT_LIB") - assert support_lib is not None, "SUPPORT_LIB is undefined" - assert os.path.exists(support_lib), f"{support_lib} does not exist" - with ir.Context() as ctx, ir.Location.unknown(): - _run_test(support_lib, _KERNEL_STR) - - -test_elementwise_add() diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/tools/np_to_sparse_tensor.py b/mlir/test/Integration/Dialect/SparseTensor/python/tools/np_to_sparse_tensor.py deleted file mode 100644 index 785d42cadbbe9..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/python/tools/np_to_sparse_tensor.py +++ /dev/null @@ -1,81 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -# This file contains functions to process sparse tensor outputs. - -import ctypes -import functools -import numpy as np - - -@functools.lru_cache() -def _get_c_shared_lib(lib_name: str): - """Loads and returns the requested C shared library. - - Args: - lib_name: A string representing the C shared library. - - Returns: - The C shared library. - - Raises: - OSError: If there is any problem in loading the shared library. - ValueError: If the shared library doesn't contain the needed routine. - """ - # This raises OSError exception if there is any problem in loading the shared - # library. - c_lib = ctypes.CDLL(lib_name) - - try: - c_lib.convertFromMLIRSparseTensorF64.restype = ctypes.c_void_p - except Exception as e: - raise ValueError( - "Missing function convertFromMLIRSparseTensorF64 from " - f"the C shared library: {e} " - ) from e - - return c_lib - - -def sparse_tensor_to_coo_tensor(support_lib, sparse, dtype): - """Converts a sparse tensor to COO-flavored format. - - Args: - support_lib: A string for the supporting C shared library. - sparse: A ctypes.pointer to the sparse tensor descriptor. - dtype: The numpy data type for the tensor elements. - - Returns: - A tuple that contains the following values: - rank: An integer for the rank of the tensor. - nse: An integer for the number of non-zero values in the tensor. - shape: A 1D numpy array of integers, for the shape of the tensor. - values: A 1D numpy array, for the non-zero values in the tensor. - indices: A 2D numpy array of integers, representing the indices for the - non-zero values in the tensor. - - Raises: - OSError: If there is any problem in loading the shared library. - ValueError: If the shared library doesn't contain the needed routine. - """ - c_lib = _get_c_shared_lib(support_lib) - - rank = ctypes.c_ulonglong(0) - nse = ctypes.c_ulonglong(0) - shape = ctypes.POINTER(ctypes.c_ulonglong)() - values = ctypes.POINTER(np.ctypeslib.as_ctypes_type(dtype))() - indices = ctypes.POINTER(ctypes.c_ulonglong)() - c_lib.convertFromMLIRSparseTensorF64( - sparse, - ctypes.byref(rank), - ctypes.byref(nse), - ctypes.byref(shape), - ctypes.byref(values), - ctypes.byref(indices), - ) - # Convert the returned values to the corresponding numpy types. - shape = np.ctypeslib.as_array(shape, shape=[rank.value]) - values = np.ctypeslib.as_array(values, shape=[nse.value]) - indices = np.ctypeslib.as_array(indices, shape=[nse.value, rank.value]) - return rank, nse, shape, values, indices From f5592f3069842d53f30b40f6e3cb8cdee560828f Mon Sep 17 00:00:00 2001 From: Peter Klausler <35819229+klausler@users.noreply.github.com> Date: Wed, 13 Sep 2023 15:25:48 -0700 Subject: [PATCH 05/22] [flang][runtime] Handle type code synonyms in CFI_... runtime (#66231) Some CFI_type_... type codes are synonyms; ensure that they are treated as equivalent when validating inputs to CFI_... runtime routines. --- flang/include/flang/Runtime/type-code.h | 15 ++++++++++++--- flang/runtime/ISO_Fortran_binding.cpp | 10 ++++++---- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/flang/include/flang/Runtime/type-code.h b/flang/include/flang/Runtime/type-code.h index df38611ab8760..fb18dba54980f 100644 --- a/flang/include/flang/Runtime/type-code.h +++ b/flang/include/flang/Runtime/type-code.h @@ -53,10 +53,19 @@ class TypeCode { RT_API_ATTRS std::optional> GetCategoryAndKind() const; - RT_API_ATTRS bool operator==(const TypeCode &that) const { - return raw_ == that.raw_; + RT_API_ATTRS bool operator==(TypeCode that) const { + if (raw_ == that.raw_) { // fast path + return true; + } else { + // Multiple raw CFI_type_... codes can represent the same Fortran + // type category + kind type parameter, e.g. CFI_type_int and + // CFI_type_int32_t. + auto thisCK{GetCategoryAndKind()}; + auto thatCK{that.GetCategoryAndKind()}; + return thisCK && thatCK && *thisCK == *thatCK; + } } - bool operator!=(const TypeCode &that) const { return raw_ != that.raw_; } + bool operator!=(TypeCode that) const { return !(*this == that); } private: ISO::CFI_type_t raw_{CFI_type_other}; diff --git a/flang/runtime/ISO_Fortran_binding.cpp b/flang/runtime/ISO_Fortran_binding.cpp index 8130875e47360..45b4d0ae3f569 100644 --- a/flang/runtime/ISO_Fortran_binding.cpp +++ b/flang/runtime/ISO_Fortran_binding.cpp @@ -148,9 +148,11 @@ int CFI_section(CFI_cdesc_t *result, const CFI_cdesc_t *source, if (IsAssumedSize(source) && !upper_bounds) { return CFI_INVALID_DESCRIPTOR; } - if ((result->type != source->type) || - (result->elem_len != source->elem_len)) { - return CFI_INVALID_DESCRIPTOR; + if (runtime::TypeCode{result->type} != runtime::TypeCode{source->type}) { + return CFI_INVALID_TYPE; + } + if (source->elem_len != result->elem_len) { + return CFI_INVALID_ELEM_LEN; } if (result->attribute == CFI_attribute_allocatable) { return CFI_INVALID_ATTRIBUTE; @@ -256,7 +258,7 @@ int CFI_setpointer(CFI_cdesc_t *result, const CFI_cdesc_t *source, if (source->rank != result->rank) { return CFI_INVALID_RANK; } - if (source->type != result->type) { + if (runtime::TypeCode{source->type} != runtime::TypeCode{result->type}) { return CFI_INVALID_TYPE; } if (source->elem_len != result->elem_len) { From 5a3130e3b645cf5fc179d9274eb1b62b7f0c7438 Mon Sep 17 00:00:00 2001 From: Jan Svoboda Date: Sat, 9 Sep 2023 19:11:36 -0700 Subject: [PATCH 06/22] [clang] NFCI: Use `FileEntryRef` in `FileManager::getCanonicalName()` --- clang/include/clang/Basic/FileManager.h | 2 +- clang/lib/Basic/FileManager.cpp | 4 ++-- clang/lib/ExtractAPI/ExtractAPIConsumer.cpp | 12 ++++++------ clang/lib/Frontend/DependencyFile.cpp | 2 +- clang/lib/Frontend/SARIFDiagnostic.cpp | 5 ++--- clang/lib/Frontend/TextDiagnostic.cpp | 4 ++-- 6 files changed, 14 insertions(+), 15 deletions(-) diff --git a/clang/include/clang/Basic/FileManager.h b/clang/include/clang/Basic/FileManager.h index 56c45e3cf68cd..115558bfeee4e 100644 --- a/clang/include/clang/Basic/FileManager.h +++ b/clang/include/clang/Basic/FileManager.h @@ -327,7 +327,7 @@ class FileManager : public RefCountedBase { /// This is a very expensive operation, despite its results being cached, /// and should only be used when the physical layout of the file system is /// required, which is (almost) never. - StringRef getCanonicalName(const FileEntry *File); + StringRef getCanonicalName(FileEntryRef File); private: /// Retrieve the canonical name for a given file or directory. diff --git a/clang/lib/Basic/FileManager.cpp b/clang/lib/Basic/FileManager.cpp index 30e2916c7f2a7..c80fbfd7433f5 100644 --- a/clang/lib/Basic/FileManager.cpp +++ b/clang/lib/Basic/FileManager.cpp @@ -636,8 +636,8 @@ StringRef FileManager::getCanonicalName(DirectoryEntryRef Dir) { return getCanonicalName(Dir, Dir.getName()); } -StringRef FileManager::getCanonicalName(const FileEntry *File) { - return getCanonicalName(File, File->getName()); +StringRef FileManager::getCanonicalName(FileEntryRef File) { + return getCanonicalName(File, File.getName()); } StringRef FileManager::getCanonicalName(const void *Entry, StringRef Name) { diff --git a/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp b/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp index 50c223680b392..3aba3bf44547c 100644 --- a/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp +++ b/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp @@ -177,17 +177,17 @@ struct LocationFileChecker { if (FID.isInvalid()) return false; - const auto *File = SM.getFileEntryForID(FID); + OptionalFileEntryRef File = SM.getFileEntryRefForID(FID); if (!File) return false; - if (KnownFileEntries.count(File)) + if (KnownFileEntries.count(*File)) return true; - if (ExternalFileEntries.count(File)) + if (ExternalFileEntries.count(*File)) return false; - StringRef FileName = SM.getFileManager().getCanonicalName(File); + StringRef FileName = SM.getFileManager().getCanonicalName(*File); // Try to reduce the include name the same way we tried to include it. bool IsQuoted = false; @@ -197,13 +197,13 @@ struct LocationFileChecker { return KnownFile.first.equals(*IncludeName) && KnownFile.second == IsQuoted; })) { - KnownFileEntries.insert(File); + KnownFileEntries.insert(*File); return true; } // Record that the file was not found to avoid future reverse lookup for // the same file. - ExternalFileEntries.insert(File); + ExternalFileEntries.insert(*File); return false; } diff --git a/clang/lib/Frontend/DependencyFile.cpp b/clang/lib/Frontend/DependencyFile.cpp index 1140c09dbb985..c2f6f41ae291e 100644 --- a/clang/lib/Frontend/DependencyFile.cpp +++ b/clang/lib/Frontend/DependencyFile.cpp @@ -159,7 +159,7 @@ void DependencyCollector::maybeAddDependency(StringRef Filename, bool IsMissing) { if (sawDependency(Filename, FromModule, IsSystem, IsModuleFile, IsMissing)) { if (IsSystem && FileMgr && shouldCanonicalizeSystemDependencies()) { - if (auto F = FileMgr->getFile(Filename)) + if (auto F = FileMgr->getOptionalFileRef(Filename)) Filename = FileMgr->getCanonicalName(*F); } addDependency(Filename); diff --git a/clang/lib/Frontend/SARIFDiagnostic.cpp b/clang/lib/Frontend/SARIFDiagnostic.cpp index ee8e8b54e512c..4e36153ed5391 100644 --- a/clang/lib/Frontend/SARIFDiagnostic.cpp +++ b/clang/lib/Frontend/SARIFDiagnostic.cpp @@ -164,8 +164,7 @@ SARIFDiagnostic::addDiagnosticLevelToRule(SarifRule Rule, llvm::StringRef SARIFDiagnostic::emitFilename(StringRef Filename, const SourceManager &SM) { if (DiagOpts->AbsolutePath) { - llvm::ErrorOr File = - SM.getFileManager().getFile(Filename); + auto File = SM.getFileManager().getOptionalFileRef(Filename); if (File) { // We want to print a simplified absolute path, i. e. without "dots". // @@ -182,7 +181,7 @@ llvm::StringRef SARIFDiagnostic::emitFilename(StringRef Filename, // on Windows we can just use llvm::sys::path::remove_dots(), because, // on that system, both aforementioned paths point to the same place. #ifdef _WIN32 - SmallString<256> TmpFilename = (*File)->getName(); + SmallString<256> TmpFilename = File->getName(); llvm::sys::fs::make_absolute(TmpFilename); llvm::sys::path::native(TmpFilename); llvm::sys::path::remove_dots(TmpFilename, /* remove_dot_dot */ true); diff --git a/clang/lib/Frontend/TextDiagnostic.cpp b/clang/lib/Frontend/TextDiagnostic.cpp index 7f558541a2871..eaa6e8d29a1de 100644 --- a/clang/lib/Frontend/TextDiagnostic.cpp +++ b/clang/lib/Frontend/TextDiagnostic.cpp @@ -736,7 +736,7 @@ void TextDiagnostic::emitFilename(StringRef Filename, const SourceManager &SM) { SmallString<4096> TmpFilename; #endif if (DiagOpts->AbsolutePath) { - auto File = SM.getFileManager().getFile(Filename); + auto File = SM.getFileManager().getOptionalFileRef(Filename); if (File) { // We want to print a simplified absolute path, i. e. without "dots". // @@ -753,7 +753,7 @@ void TextDiagnostic::emitFilename(StringRef Filename, const SourceManager &SM) { // on Windows we can just use llvm::sys::path::remove_dots(), because, // on that system, both aforementioned paths point to the same place. #ifdef _WIN32 - TmpFilename = (*File)->getName(); + TmpFilename = File->getName(); llvm::sys::fs::make_absolute(TmpFilename); llvm::sys::path::native(TmpFilename); llvm::sys::path::remove_dots(TmpFilename, /* remove_dot_dot */ true); From 8ddedbf2f8788903ec1a048830d3959138583e6b Mon Sep 17 00:00:00 2001 From: Peter Klausler <35819229+klausler@users.noreply.github.com> Date: Wed, 13 Sep 2023 15:47:31 -0700 Subject: [PATCH 07/22] =?UTF-8?q?[flang]=20Assume=20unknown=20target=20of?= =?UTF-8?q?=20procedure=20pointer=20assignment=20is=20a=20pr=E2=80=A6=20(#?= =?UTF-8?q?66232)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit …ocedure When an previously unknown name appears as the target of an assignment to a known procedure pointer, create an external symbol for it rather than an implicitly-typed object symbol. --- flang/lib/Semantics/resolve-names.cpp | 9 +++++++++ flang/test/Semantics/symbol31.f90 | 7 +++++++ 2 files changed, 16 insertions(+) create mode 100644 flang/test/Semantics/symbol31.f90 diff --git a/flang/lib/Semantics/resolve-names.cpp b/flang/lib/Semantics/resolve-names.cpp index 865c198424696..29cd107186fb5 100644 --- a/flang/lib/Semantics/resolve-names.cpp +++ b/flang/lib/Semantics/resolve-names.cpp @@ -8108,6 +8108,15 @@ bool ResolveNamesVisitor::Pre(const parser::PointerAssignmentStmt &x) { return false; } } + if (IsProcedurePointer(parser::GetLastName(dataRef).symbol) && + !FindSymbol(*name)) { + // Unknown target of procedure pointer must be an external procedure + Symbol &symbol{MakeSymbol( + context().globalScope(), name->source, Attrs{Attr::EXTERNAL})}; + Resolve(*name, symbol); + ConvertToProcEntity(symbol); + return false; + } } Walk(expr); return false; diff --git a/flang/test/Semantics/symbol31.f90 b/flang/test/Semantics/symbol31.f90 new file mode 100644 index 0000000000000..c8016cf9ec71b --- /dev/null +++ b/flang/test/Semantics/symbol31.f90 @@ -0,0 +1,7 @@ +! RUN: %python %S/test_symbols.py %s %flang_fc1 + !DEF: /MainProgram1/pptr EXTERNAL, POINTER ProcEntity + procedure(), pointer :: pptr + !REF: /MainProgram1/pptr + !DEF: /mustbeexternal EXTERNAL ProcEntity + pptr => mustbeexternal +end program From 4a831250b871d5fbd5c6923fec4a492ec35f4b12 Mon Sep 17 00:00:00 2001 From: Daniil Dudkin Date: Wed, 13 Sep 2023 22:17:52 +0000 Subject: [PATCH 08/22] =?UTF-8?q?[mlir][vector]=20Rename=20vector=20reduct?= =?UTF-8?q?ions:=20`maxf`=20=E2=86=92=20`maximumf`,=20`minf`=20=E2=86=92?= =?UTF-8?q?=20`minimumf`?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch is part of a larger initiative aimed at fixing floating-point `max` and `min` operations in MLIR: https://discourse.llvm.org/t/rfc-fix-floating-point-max-and-min-operations-in-mlir/72671. Here, we are addressing task 2.1 from the plan, which involves renaming the vector reductions to align with the semantics of the corresponding LLVM intrinsics. Reviewed By: dcaballe Differential Revision: https://reviews.llvm.org/D158618 --- .../mlir/Dialect/Vector/IR/VectorOps.td | 5 +++- .../VectorToLLVM/ConvertVectorToLLVM.cpp | 8 +++++-- .../VectorToSPIRV/VectorToSPIRV.cpp | 3 ++- .../Linalg/Transforms/Vectorization.cpp | 6 +++-- mlir/lib/Dialect/Vector/IR/VectorOps.cpp | 8 +++++-- .../Vector/Transforms/LowerVectorContract.cpp | 3 ++- .../Vector/Transforms/LowerVectorScan.cpp | 4 ++++ .../VectorToLLVM/vector-to-llvm.mlir | 12 +++++----- .../VectorToSPIRV/vector-to-spirv.mlir | 24 +++++++++---------- .../SuperVectorize/vectorize_reduction.mlir | 4 ++-- mlir/test/Dialect/Linalg/vectorization.mlir | 4 ++-- mlir/test/Dialect/Vector/canonicalize.mlir | 6 ++--- mlir/test/Dialect/Vector/ops.mlir | 8 +++++-- .../CPU/test-reductions-f32-reassoc.mlir | 4 ++-- .../Vector/CPU/test-reductions-f32.mlir | 4 ++-- .../CPU/test-reductions-f64-reassoc.mlir | 4 ++-- .../Vector/CPU/test-reductions-f64.mlir | 4 ++-- 17 files changed, 67 insertions(+), 44 deletions(-) diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td index bf42b4053ac05..28b5864914f69 100644 --- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td +++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td @@ -48,6 +48,8 @@ def COMBINING_KIND_MAXF : I32BitEnumAttrCaseBit<"MAXF", 7, "maxf">; def COMBINING_KIND_AND : I32BitEnumAttrCaseBit<"AND", 8, "and">; def COMBINING_KIND_OR : I32BitEnumAttrCaseBit<"OR", 9, "or">; def COMBINING_KIND_XOR : I32BitEnumAttrCaseBit<"XOR", 10, "xor">; +def COMBINING_KIND_MINIMUMF : I32BitEnumAttrCaseBit<"MINIMUMF", 11, "minimumf">; +def COMBINING_KIND_MAXIMUMF : I32BitEnumAttrCaseBit<"MAXIMUMF", 12, "maximumf">; def CombiningKind : I32BitEnumAttr< "CombiningKind", @@ -55,7 +57,8 @@ def CombiningKind : I32BitEnumAttr< [COMBINING_KIND_ADD, COMBINING_KIND_MUL, COMBINING_KIND_MINUI, COMBINING_KIND_MINSI, COMBINING_KIND_MINF, COMBINING_KIND_MAXUI, COMBINING_KIND_MAXSI, COMBINING_KIND_MAXF, COMBINING_KIND_AND, - COMBINING_KIND_OR, COMBINING_KIND_XOR]> { + COMBINING_KIND_OR, COMBINING_KIND_XOR, + COMBINING_KIND_MAXIMUMF, COMBINING_KIND_MINIMUMF]> { let cppNamespace = "::mlir::vector"; let genSpecializedAttr = 0; } diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp index 92f7aa6976039..5a42cccca2975 100644 --- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp +++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp @@ -762,11 +762,11 @@ class VectorReductionOpConversion result = lowerReductionWithStartValue( rewriter, loc, llvmType, operand, acc, reassociateFPReductions); - } else if (kind == vector::CombiningKind::MINF) { + } else if (kind == vector::CombiningKind::MINIMUMF) { result = createFPReductionComparisonOpLowering( rewriter, loc, llvmType, operand, acc); - } else if (kind == vector::CombiningKind::MAXF) { + } else if (kind == vector::CombiningKind::MAXIMUMF) { result = createFPReductionComparisonOpLowering( rewriter, loc, llvmType, operand, acc); @@ -893,6 +893,10 @@ class MaskedReductionOpConversion ReductionNeutralFPMin>( rewriter, loc, llvmType, operand, acc, maskOp.getMask()); break; + default: + return rewriter.notifyMatchFailure( + maskOp, + "lowering to LLVM is not implemented for this masked operation"); } // Replace `vector.mask` operation altogether. diff --git a/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp b/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp index 57191ce8dd4c9..a8c68abc8bcbf 100644 --- a/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp +++ b/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp @@ -391,7 +391,8 @@ struct VectorReductionPattern final INT_AND_FLOAT_CASE(ADD, IAddOp, FAddOp); INT_AND_FLOAT_CASE(MUL, IMulOp, FMulOp); - + INT_OR_FLOAT_CASE(MAXIMUMF, SPIRVFMaxOp); + INT_OR_FLOAT_CASE(MINIMUMF, SPIRVFMinOp); INT_OR_FLOAT_CASE(MAXF, SPIRVFMaxOp); INT_OR_FLOAT_CASE(MINF, SPIRVFMinOp); INT_OR_FLOAT_CASE(MINUI, SPIRVUMinOp); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp index cf1278a8c6806..f873bd0e0b68e 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -505,10 +505,10 @@ mlir::linalg::getCombinerOpKind(Operation *combinerOp) { .Case([&](auto op) { return CombiningKind::AND; }) .Case([&](auto op) { return CombiningKind::MAXSI; }) .Case([&](auto op) { return CombiningKind::MAXUI; }) - .Case([&](auto op) { return CombiningKind::MAXF; }) + .Case([&](auto op) { return CombiningKind::MAXIMUMF; }) .Case([&](auto op) { return CombiningKind::MINSI; }) .Case([&](auto op) { return CombiningKind::MINUI; }) - .Case([&](auto op) { return CombiningKind::MINF; }) + .Case([&](auto op) { return CombiningKind::MINIMUMF; }) .Case( [&](auto op) { return CombiningKind::MUL; }) .Case([&](auto op) { return CombiningKind::OR; }) @@ -2416,9 +2416,11 @@ bool isSupportedPoolKind(vector::CombiningKind kind) { switch (kind) { case vector::CombiningKind::ADD: case vector::CombiningKind::MAXF: + case vector::CombiningKind::MAXIMUMF: case vector::CombiningKind::MAXSI: case vector::CombiningKind::MAXUI: case vector::CombiningKind::MINF: + case vector::CombiningKind::MINIMUMF: case vector::CombiningKind::MINSI: case vector::CombiningKind::MINUI: return true; diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp index 11aa76798bcaa..a8ad05f7bc1ca 100644 --- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp +++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp @@ -125,6 +125,8 @@ static bool isSupportedCombiningKind(CombiningKind combiningKind, return elementType.isIntOrIndex(); case CombiningKind::MINF: case CombiningKind::MAXF: + case CombiningKind::MINIMUMF: + case CombiningKind::MAXIMUMF: return llvm::isa(elementType); } return false; @@ -495,7 +497,7 @@ Value mlir::vector::getVectorReductionOp(arith::AtomicRMWKind op, CombiningKind::MUL, vector); case arith::AtomicRMWKind::minimumf: return builder.create(vector.getLoc(), - CombiningKind::MINF, vector); + CombiningKind::MINIMUMF, vector); case arith::AtomicRMWKind::mins: return builder.create(vector.getLoc(), CombiningKind::MINSI, vector); @@ -504,7 +506,7 @@ Value mlir::vector::getVectorReductionOp(arith::AtomicRMWKind op, CombiningKind::MINUI, vector); case arith::AtomicRMWKind::maximumf: return builder.create(vector.getLoc(), - CombiningKind::MAXF, vector); + CombiningKind::MAXIMUMF, vector); case arith::AtomicRMWKind::maxs: return builder.create(vector.getLoc(), CombiningKind::MAXSI, vector); @@ -5947,11 +5949,13 @@ Value mlir::vector::makeArithReduction(OpBuilder &b, Location loc, result = b.createOrFold(loc, v1, acc); break; case CombiningKind::MAXF: + case CombiningKind::MAXIMUMF: assert(llvm::isa(t1) && llvm::isa(tAcc) && "expected float values"); result = b.createOrFold(loc, v1, acc); break; case CombiningKind::MINF: + case CombiningKind::MINIMUMF: assert(llvm::isa(t1) && llvm::isa(tAcc) && "expected float values"); result = b.createOrFold(loc, v1, acc); diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp index b66077372164e..3ab3c6ad8a3e2 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp @@ -140,7 +140,8 @@ createContractArithOp(Location loc, Value x, Value y, Value acc, Value mul; if (isInt) { - if (kind == CombiningKind::MINF || kind == CombiningKind::MAXF) + if (kind == CombiningKind::MINF || kind == CombiningKind::MAXF || + kind == CombiningKind::MINIMUMF || kind == CombiningKind::MAXIMUMF) // Only valid for floating point types. return std::nullopt; mul = rewriter.create(loc, x, y); diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp index 93c056be972ca..ef6e6f5264a22 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp @@ -87,9 +87,11 @@ static Value genOperator(Location loc, Value x, Value y, combinedResult = rewriter.create(loc, x, y); break; case CombiningKind::MINF: + case CombiningKind::MINIMUMF: combinedResult = rewriter.create(loc, x, y); break; case CombiningKind::MAXF: + case CombiningKind::MAXIMUMF: combinedResult = rewriter.create(loc, x, y); break; } @@ -104,7 +106,9 @@ static bool isValidKind(bool isInt, vector::CombiningKind kind) { KindType type{KindType::INVALID}; switch (kind) { case CombiningKind::MINF: + case CombiningKind::MINIMUMF: case CombiningKind::MAXF: + case CombiningKind::MAXIMUMF: type = KindType::FLOAT; break; case CombiningKind::MINUI: diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir index 44d6b637ede0a..354c79c9e198a 100644 --- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir +++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir @@ -1317,11 +1317,11 @@ func.func @reduce_mul_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 { // ----- -func.func @reduce_fmax_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { - %0 = vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 +func.func @reduce_fmaximum_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { + %0 = vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 return %0 : f32 } -// CHECK-LABEL: @reduce_fmax_f32( +// CHECK-LABEL: @reduce_fmaximum_f32( // CHECK-SAME: %[[A:.*]]: vector<16xf32>, %[[B:.*]]: f32) // CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fmaximum(%[[A]]) : (vector<16xf32>) -> f32 // CHECK: %[[R:.*]] = llvm.intr.maximum(%[[V]], %[[B]]) : (f32, f32) -> f32 @@ -1329,11 +1329,11 @@ func.func @reduce_fmax_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { // ----- -func.func @reduce_fmin_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { - %0 = vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 +func.func @reduce_fminimum_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { + %0 = vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 return %0 : f32 } -// CHECK-LABEL: @reduce_fmin_f32( +// CHECK-LABEL: @reduce_fminimum_f32( // CHECK-SAME: %[[A:.*]]: vector<16xf32>, %[[B:.*]]: f32) // CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fminimum(%[[A]]) : (vector<16xf32>) -> f32 // CHECK: %[[R:.*]] = llvm.intr.minimum(%[[V]], %[[B]]) : (f32, f32) -> f32 diff --git a/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir b/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir index 310df8030db30..f60a522cbfdba 100644 --- a/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir +++ b/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir @@ -50,7 +50,7 @@ func.func @cl_fma_size1_vector(%a: vector<1xf32>, %b: vector<1xf32>, %c: vector< return %0 : vector<1xf32> } -// CHECK-LABEL: func @cl_reduction_maxf +// CHECK-LABEL: func @cl_reduction_maximumf // CHECK-SAME: (%[[V:.+]]: vector<3xf32>, %[[S:.+]]: f32) // CHECK: %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xf32> // CHECK: %[[S1:.+]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<3xf32> @@ -59,12 +59,12 @@ func.func @cl_fma_size1_vector(%a: vector<1xf32>, %b: vector<1xf32>, %c: vector< // CHECK: %[[MAX1:.+]] = spirv.CL.fmax %[[MAX0]], %[[S2]] // CHECK: %[[MAX2:.+]] = spirv.CL.fmax %[[MAX1]], %[[S]] // CHECK: return %[[MAX2]] -func.func @cl_reduction_maxf(%v : vector<3xf32>, %s: f32) -> f32 { - %reduce = vector.reduction , %v, %s : vector<3xf32> into f32 +func.func @cl_reduction_maximumf(%v : vector<3xf32>, %s: f32) -> f32 { + %reduce = vector.reduction , %v, %s : vector<3xf32> into f32 return %reduce : f32 } -// CHECK-LABEL: func @cl_reduction_minf +// CHECK-LABEL: func @cl_reduction_minimumf // CHECK-SAME: (%[[V:.+]]: vector<3xf32>, %[[S:.+]]: f32) // CHECK: %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xf32> // CHECK: %[[S1:.+]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<3xf32> @@ -73,8 +73,8 @@ func.func @cl_reduction_maxf(%v : vector<3xf32>, %s: f32) -> f32 { // CHECK: %[[MIN1:.+]] = spirv.CL.fmin %[[MIN0]], %[[S2]] // CHECK: %[[MIN2:.+]] = spirv.CL.fmin %[[MIN1]], %[[S]] // CHECK: return %[[MIN2]] -func.func @cl_reduction_minf(%v : vector<3xf32>, %s: f32) -> f32 { - %reduce = vector.reduction , %v, %s : vector<3xf32> into f32 +func.func @cl_reduction_minimumf(%v : vector<3xf32>, %s: f32) -> f32 { + %reduce = vector.reduction , %v, %s : vector<3xf32> into f32 return %reduce : f32 } @@ -516,7 +516,7 @@ func.func @reduction_mul(%v : vector<3xf32>, %s: f32) -> f32 { // ----- -// CHECK-LABEL: func @reduction_maxf +// CHECK-LABEL: func @reduction_maximumf // CHECK-SAME: (%[[V:.+]]: vector<3xf32>, %[[S:.+]]: f32) // CHECK: %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xf32> // CHECK: %[[S1:.+]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<3xf32> @@ -525,14 +525,14 @@ func.func @reduction_mul(%v : vector<3xf32>, %s: f32) -> f32 { // CHECK: %[[MAX1:.+]] = spirv.GL.FMax %[[MAX0]], %[[S2]] // CHECK: %[[MAX2:.+]] = spirv.GL.FMax %[[MAX1]], %[[S]] // CHECK: return %[[MAX2]] -func.func @reduction_maxf(%v : vector<3xf32>, %s: f32) -> f32 { - %reduce = vector.reduction , %v, %s : vector<3xf32> into f32 +func.func @reduction_maximumf(%v : vector<3xf32>, %s: f32) -> f32 { + %reduce = vector.reduction , %v, %s : vector<3xf32> into f32 return %reduce : f32 } // ----- -// CHECK-LABEL: func @reduction_minf +// CHECK-LABEL: func @reduction_minimumf // CHECK-SAME: (%[[V:.+]]: vector<3xf32>, %[[S:.+]]: f32) // CHECK: %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xf32> // CHECK: %[[S1:.+]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<3xf32> @@ -541,8 +541,8 @@ func.func @reduction_maxf(%v : vector<3xf32>, %s: f32) -> f32 { // CHECK: %[[MIN1:.+]] = spirv.GL.FMin %[[MIN0]], %[[S2]] // CHECK: %[[MIN2:.+]] = spirv.GL.FMin %[[MIN1]], %[[S]] // CHECK: return %[[MIN2]] -func.func @reduction_minf(%v : vector<3xf32>, %s: f32) -> f32 { - %reduce = vector.reduction , %v, %s : vector<3xf32> into f32 +func.func @reduction_minimumf(%v : vector<3xf32>, %s: f32) -> f32 { + %reduce = vector.reduction , %v, %s : vector<3xf32> into f32 return %reduce : f32 } diff --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction.mlir index e6ce0446924d5..29c42fcd50bd7 100644 --- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction.mlir +++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction.mlir @@ -50,7 +50,7 @@ func.func @vecdim_reduction_minf(%in: memref<256x512xf32>, %out: memref<256xf32> // CHECK: %[[min:.*]] = arith.minimumf %[[red_iter]], %[[ld]] : vector<128xf32> // CHECK: affine.yield %[[min]] : vector<128xf32> // CHECK: } -// CHECK: %[[final_min:.*]] = vector.reduction , %[[vred:.*]] : vector<128xf32> into f32 +// CHECK: %[[final_min:.*]] = vector.reduction , %[[vred:.*]] : vector<128xf32> into f32 // CHECK: affine.store %[[final_min]], %{{.*}} : memref<256xf32> // CHECK: } @@ -77,7 +77,7 @@ func.func @vecdim_reduction_maxf(%in: memref<256x512xf32>, %out: memref<256xf32> // CHECK: %[[max:.*]] = arith.maximumf %[[red_iter]], %[[ld]] : vector<128xf32> // CHECK: affine.yield %[[max]] : vector<128xf32> // CHECK: } -// CHECK: %[[final_max:.*]] = vector.reduction , %[[vred:.*]] : vector<128xf32> into f32 +// CHECK: %[[final_max:.*]] = vector.reduction , %[[vred:.*]] : vector<128xf32> into f32 // CHECK: affine.store %[[final_max]], %{{.*}} : memref<256xf32> // CHECK: } diff --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir index da9ef1f70de4b..a5ec058b6e02c 100644 --- a/mlir/test/Dialect/Linalg/vectorization.mlir +++ b/mlir/test/Dialect/Linalg/vectorization.mlir @@ -1172,7 +1172,7 @@ transform.sequence failures(propagate) { func.func @red_max_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> { // CHECK: %[[CMINF:.+]] = arith.constant dense<-3.402820e+38> : vector<4xf32> // CHECK: tensor.empty() : tensor<4xf32> - // CHECK: vector.multi_reduction , {{.*}}, %[[CMINF]] [1] : vector<4x4xf32> to vector<4xf32> + // CHECK: vector.multi_reduction , {{.*}}, %[[CMINF]] [1] : vector<4x4xf32> to vector<4xf32> // CHECK: vector.transfer_write {{.*}} : vector<4xf32>, tensor<4xf32> %ident = arith.constant -3.40282e+38 : f32 %init = tensor.empty() : tensor<4xf32> @@ -1203,7 +1203,7 @@ func.func @red_min_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> { // CHECK: %[[CMAXF:.+]] = arith.constant dense<3.402820e+38> : vector<4xf32> // CHECK: tensor.empty() : tensor<4xf32> // CHECK: vector.transfer_read {{.*}} : tensor<4x4xf32>, vector<4x4xf32> - // CHECK: vector.multi_reduction , {{.*}}, %[[CMAXF]] [1] : vector<4x4xf32> to vector<4xf32> + // CHECK: vector.multi_reduction , {{.*}}, %[[CMAXF]] [1] : vector<4x4xf32> to vector<4xf32> // CHECK: vector.transfer_write {{.*}} : vector<4xf32>, tensor<4xf32> %maxf32 = arith.constant 3.40282e+38 : f32 %init = tensor.empty() : tensor<4xf32> diff --git a/mlir/test/Dialect/Vector/canonicalize.mlir b/mlir/test/Dialect/Vector/canonicalize.mlir index c22e68b986961..98b8ce41e15f6 100644 --- a/mlir/test/Dialect/Vector/canonicalize.mlir +++ b/mlir/test/Dialect/Vector/canonicalize.mlir @@ -1992,13 +1992,13 @@ func.func @dont_reduce_one_element_vector(%a : vector<4xf32>) -> f32 { // ----- -// CHECK-LABEL: func @reduce_one_element_vector_maxf +// CHECK-LABEL: func @reduce_one_element_vector_maximumf // CHECK-SAME: (%[[V:.+]]: vector<1xf32>, %[[B:.+]]: f32) // CHECK: %[[A:.+]] = vector.extract %[[V]][0] : vector<1xf32> // CHECK: %[[S:.+]] = arith.maximumf %[[A]], %[[B]] : f32 // CHECK: return %[[S]] -func.func @reduce_one_element_vector_maxf(%a : vector<1xf32>, %b: f32) -> f32 { - %s = vector.reduction , %a, %b : vector<1xf32> into f32 +func.func @reduce_one_element_vector_maximumf(%a : vector<1xf32>, %b: f32) -> f32 { + %s = vector.reduction , %a, %b : vector<1xf32> into f32 return %s : f32 } diff --git a/mlir/test/Dialect/Vector/ops.mlir b/mlir/test/Dialect/Vector/ops.mlir index d41cee5ea67b0..4ea4379372e83 100644 --- a/mlir/test/Dialect/Vector/ops.mlir +++ b/mlir/test/Dialect/Vector/ops.mlir @@ -576,9 +576,13 @@ func.func @reduce_fp(%arg0: vector<16xf32>, %arg1: f32) -> f32 { vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 // CHECK: vector.reduction , %{{.*}} : vector<16xf32> into f32 vector.reduction , %arg0 : vector<16xf32> into f32 - // CHECK: %[[X:.*]] = vector.reduction , %{{.*}} : vector<16xf32> into f32 + // CHECK: %[[X0:.*]] = vector.reduction , %{{.*}} : vector<16xf32> into f32 %0 = vector.reduction , %arg0 : vector<16xf32> into f32 - // CHECK: return %[[X]] : f32 + // CHECK: vector.reduction , %{{.*}} : vector<16xf32> into f32 + vector.reduction , %arg0 : vector<16xf32> into f32 + // CHECK: %[[X1:.*]] = vector.reduction , %{{.*}} : vector<16xf32> into f32 + %1 = vector.reduction , %arg0 : vector<16xf32> into f32 + // CHECK: return %[[X0]] : f32 return %0 : f32 } diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32-reassoc.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32-reassoc.mlir index d71a6a3de6492..1ebf0394d4b9f 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32-reassoc.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32-reassoc.mlir @@ -27,10 +27,10 @@ func.func @entry() { %1 = vector.reduction , %v2 : vector<64xf32> into f32 vector.print %1 : f32 // CHECK: 6 - %2 = vector.reduction , %v2 : vector<64xf32> into f32 + %2 = vector.reduction , %v2 : vector<64xf32> into f32 vector.print %2 : f32 // CHECK: 1 - %3 = vector.reduction , %v2 : vector<64xf32> into f32 + %3 = vector.reduction , %v2 : vector<64xf32> into f32 vector.print %3 : f32 // CHECK: 3 diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32.mlir index 91b0c47782266..fdb83ba055ac9 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32.mlir @@ -39,10 +39,10 @@ func.func @entry() { %1 = vector.reduction , %v9 : vector<10xf32> into f32 vector.print %1 : f32 // CHECK: -5760 - %2 = vector.reduction , %v9 : vector<10xf32> into f32 + %2 = vector.reduction , %v9 : vector<10xf32> into f32 vector.print %2 : f32 // CHECK: -16 - %3 = vector.reduction , %v9 : vector<10xf32> into f32 + %3 = vector.reduction , %v9 : vector<10xf32> into f32 vector.print %3 : f32 // CHECK: 5 diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64-reassoc.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64-reassoc.mlir index 49a92ff41d7fa..8c250de5786fb 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64-reassoc.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64-reassoc.mlir @@ -27,10 +27,10 @@ func.func @entry() { %1 = vector.reduction , %v2 : vector<64xf64> into f64 vector.print %1 : f64 // CHECK: 6 - %2 = vector.reduction , %v2 : vector<64xf64> into f64 + %2 = vector.reduction , %v2 : vector<64xf64> into f64 vector.print %2 : f64 // CHECK: 1 - %3 = vector.reduction , %v2 : vector<64xf64> into f64 + %3 = vector.reduction , %v2 : vector<64xf64> into f64 vector.print %3 : f64 // CHECK: 3 diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64.mlir index 1b063b7a6b460..5d3c2a5f02db7 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64.mlir @@ -39,10 +39,10 @@ func.func @entry() { %1 = vector.reduction , %v9 : vector<10xf64> into f64 vector.print %1 : f64 // CHECK: -5760 - %2 = vector.reduction , %v9 : vector<10xf64> into f64 + %2 = vector.reduction , %v9 : vector<10xf64> into f64 vector.print %2 : f64 // CHECK: -16 - %3 = vector.reduction , %v9 : vector<10xf64> into f64 + %3 = vector.reduction , %v9 : vector<10xf64> into f64 vector.print %3 : f64 // CHECK: 5 From 709b27427b4661bdd08fe80b0164acf53c895793 Mon Sep 17 00:00:00 2001 From: Daniil Dudkin Date: Wed, 13 Sep 2023 22:18:17 +0000 Subject: [PATCH 09/22] [mlir][vector] Bring back `maxf`/`minf` reductions This patch is part of a larger initiative aimed at fixing floating-point `max` and `min` operations in MLIR: https://discourse.llvm.org/t/rfc-fix-floating-point-max-and-min-operations-in-mlir/72671. In line with the mentioned RFC, this patch tackles tasks 2.3 and 2.4. It adds LLVM conversions for the `maxf`/`minf` reductions to the non-NaN-propagating LLVM intrinsics. Depends on D158618 Reviewed By: dcaballe Differential Revision: https://reviews.llvm.org/D158659 --- .../VectorToLLVM/ConvertVectorToLLVM.cpp | 18 ++++++++++---- .../VectorToLLVM/vector-to-llvm.mlir | 24 +++++++++++++++++++ .../CPU/test-reductions-f32-reassoc.mlir | 6 +++++ .../Vector/CPU/test-reductions-f32.mlir | 6 +++++ .../CPU/test-reductions-f64-reassoc.mlir | 6 +++++ .../Vector/CPU/test-reductions-f64.mlir | 6 +++++ 6 files changed, 62 insertions(+), 4 deletions(-) diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp index 5a42cccca2975..335e113d12b7e 100644 --- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp +++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp @@ -577,6 +577,14 @@ template <> struct VectorToScalarMapper { using Type = LLVM::MinimumOp; }; +template <> +struct VectorToScalarMapper { + using Type = LLVM::MaxNumOp; +}; +template <> +struct VectorToScalarMapper { + using Type = LLVM::MinNumOp; +}; } // namespace template @@ -770,6 +778,12 @@ class VectorReductionOpConversion result = createFPReductionComparisonOpLowering( rewriter, loc, llvmType, operand, acc); + } else if (kind == vector::CombiningKind::MINF) { + result = createFPReductionComparisonOpLowering( + rewriter, loc, llvmType, operand, acc); + } else if (kind == vector::CombiningKind::MAXF) { + result = createFPReductionComparisonOpLowering( + rewriter, loc, llvmType, operand, acc); } else return failure(); @@ -880,15 +894,11 @@ class MaskedReductionOpConversion rewriter, loc, llvmType, operand, acc, maskOp.getMask()); break; case vector::CombiningKind::MINF: - // FIXME: MLIR's 'minf' and LLVM's 'vector_reduce_fmin' do not handle - // NaNs/-0.0/+0.0 in the same way. result = lowerReductionWithStartValue( rewriter, loc, llvmType, operand, acc, maskOp.getMask()); break; case vector::CombiningKind::MAXF: - // FIXME: MLIR's 'minf' and LLVM's 'vector_reduce_fmin' do not handle - // NaNs/-0.0/+0.0 in the same way. result = lowerReductionWithStartValue( rewriter, loc, llvmType, operand, acc, maskOp.getMask()); diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir index 354c79c9e198a..4c06324087a01 100644 --- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir +++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir @@ -1341,6 +1341,30 @@ func.func @reduce_fminimum_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { // ----- +func.func @reduce_fmax_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { + %0 = vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 + return %0 : f32 +} +// CHECK-LABEL: @reduce_fmax_f32( +// CHECK-SAME: %[[A:.*]]: vector<16xf32>, %[[B:.*]]: f32) +// CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fmax(%[[A]]) : (vector<16xf32>) -> f32 +// CHECK: %[[R:.*]] = llvm.intr.maxnum(%[[V]], %[[B]]) : (f32, f32) -> f32 +// CHECK: return %[[R]] : f32 + +// ----- + +func.func @reduce_fmin_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { + %0 = vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 + return %0 : f32 +} +// CHECK-LABEL: @reduce_fmin_f32( +// CHECK-SAME: %[[A:.*]]: vector<16xf32>, %[[B:.*]]: f32) +// CHECK: %[[V:.*]] = llvm.intr.vector.reduce.fmin(%[[A]]) : (vector<16xf32>) -> f32 +// CHECK: %[[R:.*]] = llvm.intr.minnum(%[[V]], %[[B]]) : (f32, f32) -> f32 +// CHECK: return %[[R]] : f32 + +// ----- + func.func @reduce_minui_i32(%arg0: vector<16xi32>) -> i32 { %0 = vector.reduction , %arg0 : vector<16xi32> into i32 return %0 : i32 diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32-reassoc.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32-reassoc.mlir index 1ebf0394d4b9f..ce160880a0093 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32-reassoc.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32-reassoc.mlir @@ -33,6 +33,12 @@ func.func @entry() { %3 = vector.reduction , %v2 : vector<64xf32> into f32 vector.print %3 : f32 // CHECK: 3 + %4 = vector.reduction , %v2 : vector<64xf32> into f32 + vector.print %4 : f32 + // CHECK: 1 + %5 = vector.reduction , %v2 : vector<64xf32> into f32 + vector.print %5 : f32 + // CHECK: 3 return } diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32.mlir index fdb83ba055ac9..56d987ba2e225 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32.mlir @@ -45,6 +45,12 @@ func.func @entry() { %3 = vector.reduction , %v9 : vector<10xf32> into f32 vector.print %3 : f32 // CHECK: 5 + %4 = vector.reduction , %v9 : vector<10xf32> into f32 + vector.print %4 : f32 + // CHECK: -16 + %5 = vector.reduction , %v9 : vector<10xf32> into f32 + vector.print %5 : f32 + // CHECK: 5 return } diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64-reassoc.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64-reassoc.mlir index 8c250de5786fb..711144b674851 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64-reassoc.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64-reassoc.mlir @@ -33,6 +33,12 @@ func.func @entry() { %3 = vector.reduction , %v2 : vector<64xf64> into f64 vector.print %3 : f64 // CHECK: 3 + %4 = vector.reduction , %v2 : vector<64xf64> into f64 + vector.print %4 : f64 + // CHECK: 1 + %5 = vector.reduction , %v2 : vector<64xf64> into f64 + vector.print %5 : f64 + // CHECK: 3 return } diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64.mlir index 5d3c2a5f02db7..41d1bbcb731fe 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64.mlir @@ -45,6 +45,12 @@ func.func @entry() { %3 = vector.reduction , %v9 : vector<10xf64> into f64 vector.print %3 : f64 // CHECK: 5 + %4 = vector.reduction , %v9 : vector<10xf64> into f64 + vector.print %4 : f64 + // CHECK: -16 + %5 = vector.reduction , %v9 : vector<10xf64> into f64 + vector.print %5 : f64 + // CHECK: 5 return } From 8f5d519458aaf8ca7731ee974b912f6897078282 Mon Sep 17 00:00:00 2001 From: Daniil Dudkin Date: Wed, 13 Sep 2023 22:18:36 +0000 Subject: [PATCH 10/22] [mlir][vector] Implement Workaround Lowerings for Masked `fm**imum` Reductions This patch is part of a larger initiative aimed at fixing floating-point `max` and `min` operations in MLIR: https://discourse.llvm.org/t/rfc-fix-floating-point-max-and-min-operations-in-mlir/72671. Within LLVM, there are no masked reduction counterparts for vector reductions such as `fmaximum` and `fminimum`. More information can be found here: https://github.com/llvm/llvm-project/issues/64940#issuecomment-1690694156. To address this issue in MLIR, where we need to generate appropriate lowerings for these cases, we employ regular non-masked intrinsics. However, we modify the input vector using the `arith.select` operation to effectively deactivate undesired elements using a "neutral mask value". The neutral mask value is the smallest possible value for the `fmaximum` reduction and the largest possible value for the `fminimum` reduction. Depends on D158618 Reviewed By: dcaballe Differential Revision: https://reviews.llvm.org/D158773 --- .../VectorToLLVM/ConvertVectorToLLVM.cpp | 63 +++++++++++++++++-- .../vector-reduction-to-llvm.mlir | 30 +++++++++ 2 files changed, 89 insertions(+), 4 deletions(-) diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp index 335e113d12b7e..a979237d1f63e 100644 --- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp +++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp @@ -15,13 +15,17 @@ #include "mlir/Dialect/LLVMIR/FunctionCallUtils.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/Vector/IR/VectorOps.h" #include "mlir/Dialect/Vector/Interfaces/MaskableOpInterface.h" #include "mlir/Dialect/Vector/Transforms/LoweringPatterns.h" #include "mlir/Dialect/Vector/Transforms/VectorTransforms.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/BuiltinTypeInterfaces.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/TypeUtilities.h" #include "mlir/Target/LLVMIR/TypeToLLVM.h" #include "mlir/Transforms/DialectConversion.h" +#include "llvm/ADT/APFloat.h" #include "llvm/Support/Casting.h" #include @@ -603,6 +607,51 @@ createFPReductionComparisonOpLowering(ConversionPatternRewriter &rewriter, return result; } +/// Reduction neutral classes for overloading +class MaskNeutralFMaximum {}; +class MaskNeutralFMinimum {}; + +/// Get the mask neutral floating point maximum value +static llvm::APFloat +getMaskNeutralValue(MaskNeutralFMaximum, + const llvm::fltSemantics &floatSemantics) { + return llvm::APFloat::getSmallest(floatSemantics, /*Negative=*/true); +} +/// Get the mask neutral floating point minimum value +static llvm::APFloat +getMaskNeutralValue(MaskNeutralFMinimum, + const llvm::fltSemantics &floatSemantics) { + return llvm::APFloat::getLargest(floatSemantics, /*Negative=*/false); +} + +/// Create the mask neutral floating point MLIR vector constant +template +static Value createMaskNeutralValue(ConversionPatternRewriter &rewriter, + Location loc, Type llvmType, + Type vectorType) { + const auto &floatSemantics = cast(llvmType).getFloatSemantics(); + auto value = getMaskNeutralValue(MaskNeutral{}, floatSemantics); + auto denseValue = + DenseElementsAttr::get(vectorType.cast(), value); + return rewriter.create(loc, vectorType, denseValue); +} + +/// Lowers masked `fmaximum` and `fminimum` reductions using the non-masked +/// intrinsics. It is a workaround to overcome the lack of masked intrinsics for +/// `fmaximum`/`fminimum`. +/// More information: https://github.com/llvm/llvm-project/issues/64940 +template +static Value lowerMaskedReductionWithRegular( + ConversionPatternRewriter &rewriter, Location loc, Type llvmType, + Value vectorOperand, Value accumulator, Value mask) { + const Value vectorMaskNeutral = createMaskNeutralValue( + rewriter, loc, llvmType, vectorOperand.getType()); + const Value selectedVectorByMask = rewriter.create( + loc, mask, vectorOperand, vectorMaskNeutral); + return createFPReductionComparisonOpLowering( + rewriter, loc, llvmType, selectedVectorByMask, accumulator); +} + /// Overloaded methods to lower a reduction to an llvm instrinsic that requires /// a start value. This start value format spans across fp reductions without /// mask and all the masked reduction intrinsics. @@ -903,10 +952,16 @@ class MaskedReductionOpConversion ReductionNeutralFPMin>( rewriter, loc, llvmType, operand, acc, maskOp.getMask()); break; - default: - return rewriter.notifyMatchFailure( - maskOp, - "lowering to LLVM is not implemented for this masked operation"); + case CombiningKind::MAXIMUMF: + result = lowerMaskedReductionWithRegular( + rewriter, loc, llvmType, operand, acc, maskOp.getMask()); + break; + case CombiningKind::MINIMUMF: + result = lowerMaskedReductionWithRegular( + rewriter, loc, llvmType, operand, acc, maskOp.getMask()); + break; } // Replace `vector.mask` operation altogether. diff --git a/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir index c1b1eb05077f2..fd2d6ae5a472f 100644 --- a/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir +++ b/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir @@ -101,6 +101,36 @@ func.func @masked_reduce_maxf_f32(%arg0: vector<16xf32>, %mask : vector<16xi1>) // ----- +func.func @masked_reduce_maximumf_f32(%arg0: vector<16xf32>, %mask : vector<16xi1>) -> f32 { + %0 = vector.mask %mask { vector.reduction , %arg0 : vector<16xf32> into f32 } : vector<16xi1> -> f32 + return %0 : f32 +} + +// CHECK-LABEL: func.func @masked_reduce_maximumf_f32( +// CHECK-SAME: %[[INPUT:.*]]: vector<16xf32>, +// CHECK-SAME: %[[MASK:.*]]: vector<16xi1>) -> f32 { +// CHECK: %[[MASK_NEUTRAL:.*]] = llvm.mlir.constant(dense<-1.401300e-45> : vector<16xf32>) : vector<16xf32> +// CHECK: %[[MASKED:.*]] = llvm.select %[[MASK]], %[[INPUT]], %[[MASK_NEUTRAL]] : vector<16xi1>, vector<16xf32> +// CHECK: %[[RESULT:.*]] = llvm.intr.vector.reduce.fmaximum(%[[MASKED]]) : (vector<16xf32>) -> f32 +// CHECK: return %[[RESULT]] + +// ----- + +func.func @masked_reduce_minimumf_f32(%arg0: vector<16xf32>, %mask : vector<16xi1>) -> f32 { + %0 = vector.mask %mask { vector.reduction , %arg0 : vector<16xf32> into f32 } : vector<16xi1> -> f32 + return %0 : f32 +} + +// CHECK-LABEL: func.func @masked_reduce_minimumf_f32( +// CHECK-SAME: %[[INPUT:.*]]: vector<16xf32>, +// CHECK-SAME: %[[MASK:.*]]: vector<16xi1>) -> f32 { +// CHECK: %[[MASK_NEUTRAL:.*]] = llvm.mlir.constant(dense<3.40282347E+38> : vector<16xf32>) : vector<16xf32> +// CHECK: %[[MASKED:.*]] = llvm.select %[[MASK]], %[[INPUT]], %[[MASK_NEUTRAL]] : vector<16xi1>, vector<16xf32> +// CHECK: %[[RESULT:.*]] = llvm.intr.vector.reduce.fminimum(%[[MASKED]]) : (vector<16xf32>) -> f32 +// CHECK: return %[[RESULT]] + +// ----- + func.func @masked_reduce_add_i8(%arg0: vector<32xi8>, %mask : vector<32xi1>) -> i8 { %0 = vector.mask %mask { vector.reduction , %arg0 : vector<32xi8> into i8 } : vector<32xi1> -> i8 return %0 : i8 From c60ee7c7b2154013e2b7b4f83ea422fbb4015cb7 Mon Sep 17 00:00:00 2001 From: Jan Svoboda Date: Wed, 13 Sep 2023 15:51:24 -0700 Subject: [PATCH 11/22] [clang][deps] Implement move-conversion for `CowCompilerInvocation` (#66301) This avoids making copies at the end of `makeCommonInvocationForModuleBuild()` (in `ModuleDepCollector.cpp`). --- clang/include/clang/Frontend/CompilerInvocation.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/clang/include/clang/Frontend/CompilerInvocation.h b/clang/include/clang/Frontend/CompilerInvocation.h index b79a1a53c1501..45e263e7bc768 100644 --- a/clang/include/clang/Frontend/CompilerInvocation.h +++ b/clang/include/clang/Frontend/CompilerInvocation.h @@ -354,6 +354,9 @@ class CowCompilerInvocation : public CompilerInvocationBase { deep_copy_assign(X); } + CowCompilerInvocation(CompilerInvocation &&X) + : CompilerInvocationBase(std::move(X)) {} + // Const getters are inherited from the base class. /// Mutable getters. From 0f65df732ca2b124f473179eda801abf4496a350 Mon Sep 17 00:00:00 2001 From: Aart Bik <39774503+aartbik@users.noreply.github.com> Date: Wed, 13 Sep 2023 15:54:49 -0700 Subject: [PATCH 12/22] [mlir][sparse] remove the MLIR PyTACO tests (#66302) Rationale: This test was really fun to compare the MLIR sparsifier with TACO using the PyTACO format. However, the underlying mechanism is rapidly growing outdated with our recent developments. Rather than maintaining the old code, we are moving toward the newer, better approaches. So if you are sad this is gone, stay tuned, something better is coming! --- .../Dialect/SparseTensor/taco/README.md | 27 - .../Dialect/SparseTensor/taco/data/A.mtx | 11 - .../Dialect/SparseTensor/taco/data/B.mtx | 11 - .../Dialect/SparseTensor/taco/data/gold_A.tns | 53 - .../Dialect/SparseTensor/taco/data/gold_C.tns | 12 - .../Dialect/SparseTensor/taco/data/gold_y.tns | 6 - .../Dialect/SparseTensor/taco/data/nell-2.tns | 10 - .../Dialect/SparseTensor/taco/data/pwtk.mtx | 14 - .../Dialect/SparseTensor/taco/lit.local.cfg | 5 - .../Dialect/SparseTensor/taco/test_MTTKRP.py | 55 - .../Dialect/SparseTensor/taco/test_SDDMM.py | 58 - .../Dialect/SparseTensor/taco/test_SpMM.py | 35 - .../Dialect/SparseTensor/taco/test_SpMV.py | 56 - .../Dialect/SparseTensor/taco/test_Tensor.py | 99 - .../taco/test_scalar_tensor_algebra.py | 36 - .../taco/test_simple_tensor_algebra.py | 61 - .../SparseTensor/taco/test_tensor_complex.py | 31 - .../SparseTensor/taco/test_tensor_types.py | 33 - .../taco/test_tensor_unary_ops.py | 40 - .../taco/test_true_dense_tensor_algebra.py | 22 - .../SparseTensor/taco/tools/lit.local.cfg | 2 - .../SparseTensor/taco/tools/mlir_pytaco.py | 2279 ----------------- .../taco/tools/mlir_pytaco_api.py | 53 - .../SparseTensor/taco/tools/mlir_pytaco_io.py | 82 - .../taco/tools/mlir_pytaco_utils.py | 424 --- .../taco/tools/mlir_sparse_compiler.py | 41 - .../SparseTensor/taco/tools/testing_utils.py | 47 - .../taco/unit_test_tensor_core.py | 647 ----- .../SparseTensor/taco/unit_test_tensor_io.py | 116 - .../taco/unit_test_tensor_utils.py | 135 - 30 files changed, 4501 deletions(-) delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/README.md delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/data/A.mtx delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/data/B.mtx delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/data/gold_A.tns delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/data/gold_C.tns delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/data/gold_y.tns delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/data/nell-2.tns delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/data/pwtk.mtx delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/lit.local.cfg delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/test_MTTKRP.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/test_SDDMM.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/test_SpMM.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/test_SpMV.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/test_Tensor.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/test_scalar_tensor_algebra.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/test_simple_tensor_algebra.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/test_tensor_complex.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/test_tensor_types.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/test_tensor_unary_ops.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/test_true_dense_tensor_algebra.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/tools/lit.local.cfg delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_api.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_io.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_utils.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_sparse_compiler.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/tools/testing_utils.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/unit_test_tensor_core.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/unit_test_tensor_io.py delete mode 100644 mlir/test/Integration/Dialect/SparseTensor/taco/unit_test_tensor_utils.py diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/README.md b/mlir/test/Integration/Dialect/SparseTensor/taco/README.md deleted file mode 100644 index 88a8ce2581962..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# MLIR-PyTACO: Implementing PyTACO with MLIR - -TACO (http://tensor-compiler.org/) is a tensor algebra compiler. TACO defines -PyTACO, a domain specific language in Python, for writing tensor algebra -applications. - -This directory contains the implementation of PyTACO using MLIR. In particular, -we implement a Python layer that accepts the PyTACO language, generates MLIR -linalg.generic OPs with sparse tensor annotation to represent the tensor -computation, and invokes the MLIR sparse tensor code generator -(https://mlir.llvm.org/docs/Dialects/SparseTensorOps/) as well as other MLIR -compilation passes to generate an executable. Then, we invoke the MLIR execution -engine to execute the program and pass the result back to the Python layer. - -As can be seen from the tests in this directory, in order to port a PyTACO -program to MLIR-PyTACO, we basically only need to replace this line that imports -PyTACO: - -```python -import pytaco as pt -``` - -with this line to import MLIR-PyTACO: - -```python -from tools import mlir_pytaco_api as pt -``` diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/data/A.mtx b/mlir/test/Integration/Dialect/SparseTensor/taco/data/A.mtx deleted file mode 100644 index 6ea0893af6164..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/data/A.mtx +++ /dev/null @@ -1,11 +0,0 @@ -%%MatrixMarket matrix coordinate real general -3 3 9 -1 1 1.0 -1 2 2.0 -1 3 4.0 -2 1 4.0 -2 2 5.0 -2 3 6.0 -3 1 7.0 -3 2 8.0 -3 3 9.0 diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/data/B.mtx b/mlir/test/Integration/Dialect/SparseTensor/taco/data/B.mtx deleted file mode 100644 index 9bb604d44c7c9..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/data/B.mtx +++ /dev/null @@ -1,11 +0,0 @@ -%%MatrixMarket matrix coordinate real general -3 3 9 -1 1 10.0 -1 2 11.0 -1 3 12.0 -2 1 13.0 -2 2 14.0 -2 3 15.0 -3 1 16.0 -3 2 17.0 -3 3 18.0 diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/data/gold_A.tns b/mlir/test/Integration/Dialect/SparseTensor/taco/data/gold_A.tns deleted file mode 100644 index f06646b51feca..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/data/gold_A.tns +++ /dev/null @@ -1,53 +0,0 @@ -# See http://frostt.io/tensors/file-formats.html for FROSTT (.tns) format -2 50 -2 25 -1 1 12 -1 2 12 -1 3 12 -1 4 12 -1 5 12 -1 6 12 -1 7 12 -1 8 12 -1 9 12 -1 10 12 -1 11 12 -1 12 12 -1 13 12 -1 14 12 -1 15 12 -1 16 12 -1 17 12 -1 18 12 -1 19 12 -1 20 12 -1 21 12 -1 22 12 -1 23 12 -1 24 12 -1 25 12 -2 1 6 -2 2 6 -2 3 6 -2 4 6 -2 5 6 -2 6 6 -2 7 6 -2 8 6 -2 9 6 -2 10 6 -2 11 6 -2 12 6 -2 13 6 -2 14 6 -2 15 6 -2 16 6 -2 17 6 -2 18 6 -2 19 6 -2 20 6 -2 21 6 -2 22 6 -2 23 6 -2 24 6 -2 25 6 diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/data/gold_C.tns b/mlir/test/Integration/Dialect/SparseTensor/taco/data/gold_C.tns deleted file mode 100644 index 61bec5dece951..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/data/gold_C.tns +++ /dev/null @@ -1,12 +0,0 @@ -# See http://frostt.io/tensors/file-formats.html for FROSTT (.tns) format -2 9 -3 3 -1 1 100 -1 2 107 -1 3 114 -2 1 201 -2 2 216 -2 3 231 -3 1 318 -3 2 342 -3 3 366 diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/data/gold_y.tns b/mlir/test/Integration/Dialect/SparseTensor/taco/data/gold_y.tns deleted file mode 100644 index 832cb1795aaaa..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/data/gold_y.tns +++ /dev/null @@ -1,6 +0,0 @@ -# See http://frostt.io/tensors/file-formats.html for FROSTT (.tns) format -1 3 -3 -1 37102 -2 -20.4138 -3 804927 diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/data/nell-2.tns b/mlir/test/Integration/Dialect/SparseTensor/taco/data/nell-2.tns deleted file mode 100644 index b82ce864820fb..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/data/nell-2.tns +++ /dev/null @@ -1,10 +0,0 @@ -# Extended FROSTT format: -# rank number-non-zero-elements -# dimension-sizes -3 5 -2 4 4 -1 1 1 1.0 -1 2 2 2.0 -1 3 4 3.0 -2 1 1 1.0 -2 4 3 2.0 diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/data/pwtk.mtx b/mlir/test/Integration/Dialect/SparseTensor/taco/data/pwtk.mtx deleted file mode 100644 index c207a8681c4e9..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/data/pwtk.mtx +++ /dev/null @@ -1,14 +0,0 @@ -%%MatrixMarket matrix coordinate real general -%------------------------------------------------------------------------------- -% To download a matrix for a real world application -% https://math.nist.gov/MatrixMarket/ -%------------------------------------------------------------------------------- -3 3 8 -1 1 37423.0879671 -1 2 -22.4050781162 -1 3 -300.654980157 -2 1 -22.4050781162 -2 3 -.00869762944058 -3 1 -300.654980157 -3 2 -.00869762944058 -3 3 805225.750212 diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/lit.local.cfg b/mlir/test/Integration/Dialect/SparseTensor/taco/lit.local.cfg deleted file mode 100644 index f1bbcf486bc27..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/lit.local.cfg +++ /dev/null @@ -1,5 +0,0 @@ -# Disable ASAN's leak detection for python taco tests. -config.environment["ASAN_OPTIONS"] = "detect_leaks=0" -# Only run when python bindings are enabled. -if not config.enable_bindings_python: - config.unsupported = True diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/test_MTTKRP.py b/mlir/test/Integration/Dialect/SparseTensor/taco/test_MTTKRP.py deleted file mode 100644 index 2d558f8d6ddff..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/test_MTTKRP.py +++ /dev/null @@ -1,55 +0,0 @@ -# RUN: env SUPPORTLIB=%mlir_c_runner_utils %PYTHON %s | FileCheck %s - -import numpy as np -import os -import sys -import tempfile - -_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(_SCRIPT_PATH) - -from tools import mlir_pytaco_api as pt -from tools import testing_utils as utils - -###### This PyTACO part is taken from the TACO open-source project. ###### -# See http://tensor-compiler.org/docs/data_analytics/index.html. - -compressed = pt.compressed -dense = pt.dense - -# Define formats for storing the sparse tensor and dense matrices. -csf = pt.format([compressed, compressed, compressed]) -rm = pt.format([dense, dense]) - -# Load a sparse three-dimensional tensor from file (stored in the FROSTT -# format) and store it as a compressed sparse fiber tensor. We use a small -# tensor for the purpose of testing. To run the program using the data from -# the real application, please download the data from: -# http://frostt.io/tensors/nell-2/ -B = pt.read(os.path.join(_SCRIPT_PATH, "data/nell-2.tns"), csf) - -# These two lines have been modified from the original program to use static -# data to support result comparison. -C = pt.from_array(np.full((B.shape[1], 25), 1, dtype=np.float32)) -D = pt.from_array(np.full((B.shape[2], 25), 2, dtype=np.float32)) - -# Declare the result to be a dense matrix. -A = pt.tensor([B.shape[0], 25], rm) - -# Declare index vars. -i, j, k, l = pt.get_index_vars(4) - -# Define the MTTKRP computation. -A[i, j] = B[i, k, l] * D[l, j] * C[k, j] - -########################################################################## - -# Perform the MTTKRP computation and write the result to file. -with tempfile.TemporaryDirectory() as test_dir: - golden_file = os.path.join(_SCRIPT_PATH, "data/gold_A.tns") - out_file = os.path.join(test_dir, "A.tns") - pt.write(out_file, A) - # - # CHECK: Compare result True - # - print(f"Compare result {utils.compare_sparse_tns(golden_file, out_file)}") diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/test_SDDMM.py b/mlir/test/Integration/Dialect/SparseTensor/taco/test_SDDMM.py deleted file mode 100644 index ef94ea9900fe4..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/test_SDDMM.py +++ /dev/null @@ -1,58 +0,0 @@ -# RUN: env SUPPORTLIB=%mlir_c_runner_utils %PYTHON %s | FileCheck %s - -import filecmp -import numpy as np -import os -import sys -import tempfile - -_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(_SCRIPT_PATH) - -from tools import mlir_pytaco_api as pt -from tools import testing_utils as utils - -i, j, k = pt.get_index_vars(3) - -# Set up dense matrices. -A = pt.from_array(np.full((8, 8), 2.0, dtype=np.float32)) -B = pt.from_array(np.full((8, 8), 3.0, dtype=np.float32)) - -# Set up sparse matrices. -S = pt.tensor([8, 8], pt.format([pt.compressed, pt.compressed])) -X = pt.tensor([8, 8], pt.format([pt.compressed, pt.compressed])) -Y = pt.tensor([8, 8], pt.compressed) # alternative syntax works too - -S.insert([0, 7], 42.0) - -# Define the SDDMM kernel. Since this performs the reduction as -# sum(k, S[i, j] * A[i, k] * B[k, j]) -# we only compute the intermediate dense matrix product that are actually -# needed to compute the result, with proper asymptotic complexity. -X[i, j] = S[i, j] * A[i, k] * B[k, j] - -# Alternative way to define SDDMM kernel. Since this performs the reduction as -# sum(k, A[i, k] * B[k, j]) * S[i, j] -# the MLIR lowering results in two separate tensor index expressions that are -# fused prior to running the sparse compiler in order to guarantee proper -# asymptotic complexity. -Y[i, j] = A[i, k] * B[k, j] * S[i, j] - -expected = """; extended FROSTT format -2 1 -8 8 -1 8 2016 -""" - -# Force evaluation of the kernels by writing out X and Y. -with tempfile.TemporaryDirectory() as test_dir: - x_file = os.path.join(test_dir, "X.tns") - y_file = os.path.join(test_dir, "Y.tns") - pt.write(x_file, X) - pt.write(y_file, Y) - # - # CHECK: Compare result True True - # - x_data = utils.file_as_string(x_file) - y_data = utils.file_as_string(y_file) - print(f"Compare result {x_data == expected} {y_data == expected}") diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/test_SpMM.py b/mlir/test/Integration/Dialect/SparseTensor/taco/test_SpMM.py deleted file mode 100644 index 02bbbc096e7a3..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/test_SpMM.py +++ /dev/null @@ -1,35 +0,0 @@ -# RUN: env SUPPORTLIB=%mlir_c_runner_utils %PYTHON %s | FileCheck %s - -import filecmp -import numpy as np -import os -import sys -import tempfile - -_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(_SCRIPT_PATH) - -from tools import mlir_pytaco_api as pt -from tools import testing_utils as utils - -# Define the CSR format. -csr = pt.format([pt.dense, pt.compressed], [0, 1]) - -# Read matrices A and B from file, infer size of output matrix C. -A = pt.read(os.path.join(_SCRIPT_PATH, "data/A.mtx"), csr) -B = pt.read(os.path.join(_SCRIPT_PATH, "data/B.mtx"), csr) -C = pt.tensor([A.shape[0], B.shape[1]], csr) - -# Define the kernel. -i, j, k = pt.get_index_vars(3) -C[i, j] = A[i, k] * B[k, j] - -# Force evaluation of the kernel by writing out C. -with tempfile.TemporaryDirectory() as test_dir: - golden_file = os.path.join(_SCRIPT_PATH, "data/gold_C.tns") - out_file = os.path.join(test_dir, "C.tns") - pt.write(out_file, C) - # - # CHECK: Compare result True - # - print(f"Compare result {utils.compare_sparse_tns(golden_file, out_file)}") diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/test_SpMV.py b/mlir/test/Integration/Dialect/SparseTensor/taco/test_SpMV.py deleted file mode 100644 index 2038a473ae530..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/test_SpMV.py +++ /dev/null @@ -1,56 +0,0 @@ -# RUN: env SUPPORTLIB=%mlir_c_runner_utils %PYTHON %s | FileCheck %s - -import numpy as np -import os -import sys -import tempfile - -_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(_SCRIPT_PATH) - -from tools import mlir_pytaco_api as pt -from tools import testing_utils as utils - -###### This PyTACO part is taken from the TACO open-source project. ###### -# See http://tensor-compiler.org/docs/scientific_computing/index.html. - -compressed = pt.compressed -dense = pt.dense - -# Define formats for storing the sparse matrix and dense vectors. -csr = pt.format([dense, compressed]) -dv = pt.format([dense]) - -# Load a sparse matrix stored in the matrix market format) and store it -# as a CSR matrix. The matrix in this test is a reduced version of the data -# downloaded from here: -# https://www.cise.ufl.edu/research/sparse/MM/Boeing/pwtk.tar.gz -# In order to run the program using the matrix above, you can download the -# matrix and replace this path to the actual path to the file. -A = pt.read(os.path.join(_SCRIPT_PATH, "data/pwtk.mtx"), csr) - -# These two lines have been modified from the original program to use static -# data to support result comparison. -x = pt.from_array(np.full((A.shape[1],), 1, dtype=np.float32)) -z = pt.from_array(np.full((A.shape[0],), 2, dtype=np.float32)) - -# Declare the result to be a dense vector -y = pt.tensor([A.shape[0]], dv) - -# Declare index vars -i, j = pt.get_index_vars(2) - -# Define the SpMV computation -y[i] = A[i, j] * x[j] + z[i] - -########################################################################## - -# Perform the SpMV computation and write the result to file -with tempfile.TemporaryDirectory() as test_dir: - golden_file = os.path.join(_SCRIPT_PATH, "data/gold_y.tns") - out_file = os.path.join(test_dir, "y.tns") - pt.write(out_file, y) - # - # CHECK: Compare result True - # - print(f"Compare result {utils.compare_sparse_tns(golden_file, out_file)}") diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/test_Tensor.py b/mlir/test/Integration/Dialect/SparseTensor/taco/test_Tensor.py deleted file mode 100644 index cd24e0dbb0a43..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/test_Tensor.py +++ /dev/null @@ -1,99 +0,0 @@ -# RUN: env SUPPORTLIB=%mlir_c_runner_utils %PYTHON %s | FileCheck %s - -import filecmp -import numpy as np -import os -import sys -import tempfile - -_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(_SCRIPT_PATH) - -from tools import mlir_pytaco_api as pt -from tools import testing_utils as utils - -i, j, k, l, m = pt.get_index_vars(5) - -# Set up scalar. -alpha = pt.tensor(42.0) - -# Set up some sparse tensors with different dim annotations and ordering. -S = pt.tensor([8, 8, 8], pt.format([pt.compressed, pt.dense, pt.compressed], [1, 0, 2])) -X = pt.tensor( - [8, 8, 8], pt.format([pt.compressed, pt.compressed, pt.compressed], [1, 0, 2]) -) -S.insert([0, 0, 0], 2.0) -S.insert([1, 1, 1], 3.0) -S.insert([4, 4, 4], 4.0) -S.insert([7, 7, 7], 5.0) - -X[i, j, k] = alpha[0] * S[i, j, k] - -# Set up tensors with a dense last dimension. This results in a full -# enveloping storage of all last "rows" with one or more nonzeros. -T = pt.tensor( - [1, 2, 3, 4, 5], - pt.format([pt.compressed, pt.compressed, pt.compressed, pt.compressed, pt.dense]), -) -Y = pt.tensor( - [1, 2, 3, 4, 5], - pt.format([pt.compressed, pt.compressed, pt.compressed, pt.compressed, pt.dense]), -) -T.insert([0, 1, 2, 3, 4], -2.0) - -Y[i, j, k, l, m] = alpha[0] * T[i, j, k, l, m] - -# Set up a sparse tensor and dense tensor with different access. -U = pt.tensor([2, 3], pt.format([pt.compressed, pt.compressed], [1, 0])) -Z = pt.tensor([3, 2], pt.format([pt.dense, pt.dense])) -U.insert([1, 2], 3.0) - -Z[i, j] = alpha[0] * U[j, i] - -x_expected = """; extended FROSTT format -3 4 -8 8 8 -1 1 1 84 -2 2 2 126 -5 5 5 168 -8 8 8 210 -""" - -y_expected = """; extended FROSTT format -5 5 -1 2 3 4 5 -1 2 3 4 1 0 -1 2 3 4 2 0 -1 2 3 4 3 0 -1 2 3 4 4 0 -1 2 3 4 5 -84 -""" - -z_expected = """; extended FROSTT format -2 6 -3 2 -1 1 0 -1 2 0 -2 1 0 -2 2 0 -3 1 0 -3 2 126 -""" - -# Force evaluation of the kernel by writing out X. -with tempfile.TemporaryDirectory() as test_dir: - x_file = os.path.join(test_dir, "X.tns") - pt.write(x_file, X) - y_file = os.path.join(test_dir, "Y.tns") - pt.write(y_file, Y) - z_file = os.path.join(test_dir, "Z.tns") - pt.write(z_file, Z) - # - # CHECK: Compare result True True True - # - x_data = utils.file_as_string(x_file) - y_data = utils.file_as_string(y_file) - z_data = utils.file_as_string(z_file) - print( - f"Compare result {x_data == x_expected} {y_data == y_expected} {z_data == z_expected}" - ) diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/test_scalar_tensor_algebra.py b/mlir/test/Integration/Dialect/SparseTensor/taco/test_scalar_tensor_algebra.py deleted file mode 100644 index 206ffa9316d48..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/test_scalar_tensor_algebra.py +++ /dev/null @@ -1,36 +0,0 @@ -# RUN: env SUPPORTLIB=%mlir_c_runner_utils %PYTHON %s | FileCheck %s - -import numpy as np -import os -import sys - -_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(_SCRIPT_PATH) -from tools import mlir_pytaco_api as pt - -compressed = pt.compressed - -i, j = pt.get_index_vars(2) -A = pt.tensor([2, 3]) -S = pt.tensor(3) # S is a scalar tensor. -B = pt.tensor([2, 3], compressed) -A.insert([0, 1], 10) -A.insert([1, 2], 40) - -# Use [0] to index the scalar tensor. -B[i, j] = A[i, j] * S[0] - -indices, values = B.get_coordinates_and_values() -passed = np.array_equal(indices, [[0, 1], [1, 2]]) -passed += np.array_equal(values, [30.0, 120.0]) - -# Sum all the values in A. -S[0] = A[i, j] -passed += S.get_scalar_value() == 50.0 - -indices, values = S.get_coordinates_and_values() -passed += len(indices) == 0 -passed += values == 50.0 - -# CHECK: Number of passed: 5 -print("Number of passed:", passed) diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/test_simple_tensor_algebra.py b/mlir/test/Integration/Dialect/SparseTensor/taco/test_simple_tensor_algebra.py deleted file mode 100644 index 7ceb8585a7e0c..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/test_simple_tensor_algebra.py +++ /dev/null @@ -1,61 +0,0 @@ -# RUN: env SUPPORTLIB=%mlir_c_runner_utils %PYTHON %s | FileCheck %s - -import numpy as np -import os -import sys - -_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(_SCRIPT_PATH) -from tools import mlir_pytaco_api as pt - -compressed = pt.compressed -dense = pt.dense - -# Ensure that we can run an unmodified PyTACO program with a simple tensor -# algebra expression using tensor index notation, and produce the expected -# result. -i, j = pt.get_index_vars(2) -A = pt.tensor([2, 3]) -B = pt.tensor([2, 3]) -C = pt.tensor([2, 3]) -D = pt.tensor([2, 3], compressed) -A.insert([0, 1], 10) -A.insert([1, 2], 40) -B.insert([0, 0], 20) -B.insert([1, 2], 30) -C.insert([0, 1], 5) -C.insert([1, 2], 7) -D[i, j] = A[i, j] + B[i, j] - C[i, j] - -indices, values = D.get_coordinates_and_values() -passed = np.array_equal(indices, [[0, 0], [0, 1], [1, 2]]) -passed += np.allclose(values, [20.0, 5.0, 63.0]) - -# PyTACO doesn't allow the use of index values, but MLIR-PyTACO removes this -# restriction. -E = pt.tensor([3]) -E[i] = i -indices, values = E.get_coordinates_and_values() -passed += np.array_equal(indices, [[0], [1], [2]]) -passed += np.allclose(values, [0.0, 1.0, 2.0]) - -F = pt.tensor([3]) -G = pt.tensor([3]) -F.insert([0], 10) -F.insert([2], 40) -G[i] = F[i] + i -indices, values = G.get_coordinates_and_values() -passed += np.array_equal(indices, [[0], [1], [2]]) -passed += np.allclose(values, [10.0, 1.0, 42.0]) - -H = pt.tensor([3]) -I = pt.tensor([3]) -H.insert([0], 10) -H.insert([2], 40) -I[i] = H[i] * i -indices, values = I.get_coordinates_and_values() -passed += np.array_equal(indices, [[0], [2]]) -passed += np.allclose(values, [0.0, 80.0]) - -# CHECK: Number of passed: 8 -print("Number of passed:", passed) diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/test_tensor_complex.py b/mlir/test/Integration/Dialect/SparseTensor/taco/test_tensor_complex.py deleted file mode 100644 index b0fed50f8b5db..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/test_tensor_complex.py +++ /dev/null @@ -1,31 +0,0 @@ -# RUN: env SUPPORTLIB=%mlir_c_runner_utils %PYTHON %s | FileCheck %s -import numpy as np -import os -import sys - -_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(_SCRIPT_PATH) -from tools import mlir_pytaco_api as pt - -compressed = pt.compressed - -passed = 0 -all_types = [pt.complex64, pt.complex128] -for t in all_types: - i, j = pt.get_index_vars(2) - A = pt.tensor([2, 3], dtype=t) - B = pt.tensor([2, 3], dtype=t) - C = pt.tensor([2, 3], compressed, dtype=t) - A.insert([0, 1], 10 + 20j) - A.insert([1, 2], 40 + 0.5j) - B.insert([0, 0], 20) - B.insert([1, 2], 30 + 15j) - C[i, j] = A[i, j] + B[i, j] - - indices, values = C.get_coordinates_and_values() - passed += isinstance(values[0], t.value) - passed += np.array_equal(indices, [[0, 0], [0, 1], [1, 2]]) - passed += np.allclose(values, [20, 10 + 20j, 70 + 15.5j]) - -# CHECK: Number of passed: 6 -print("Number of passed:", passed) diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/test_tensor_types.py b/mlir/test/Integration/Dialect/SparseTensor/taco/test_tensor_types.py deleted file mode 100644 index 4ba2836dd4616..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/test_tensor_types.py +++ /dev/null @@ -1,33 +0,0 @@ -# RUN: env SUPPORTLIB=%mlir_c_runner_utils %PYTHON %s | FileCheck %s - -import numpy as np -import os -import sys - -_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(_SCRIPT_PATH) -from tools import mlir_pytaco_api as pt - -compressed = pt.compressed -dense = pt.dense - -passed = 0 -all_types = [pt.int8, pt.int16, pt.int32, pt.int64, pt.float16, pt.float32, pt.float64] -for t in all_types: - i, j = pt.get_index_vars(2) - A = pt.tensor([2, 3], dtype=t) - B = pt.tensor([2, 3], dtype=t) - C = pt.tensor([2, 3], compressed, dtype=t) - A.insert([0, 1], 10) - A.insert([1, 2], 40) - B.insert([0, 0], 20) - B.insert([1, 2], 30) - C[i, j] = A[i, j] + B[i, j] - - indices, values = C.get_coordinates_and_values() - passed += isinstance(values[0], t.value) - passed += np.array_equal(indices, [[0, 0], [0, 1], [1, 2]]) - passed += np.allclose(values, [20.0, 10.0, 70.0]) - -# CHECK: Number of passed: 21 -print("Number of passed:", passed) diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/test_tensor_unary_ops.py b/mlir/test/Integration/Dialect/SparseTensor/taco/test_tensor_unary_ops.py deleted file mode 100644 index 6fcb41e30eea3..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/test_tensor_unary_ops.py +++ /dev/null @@ -1,40 +0,0 @@ -# RUN: env SUPPORTLIB=%mlir_c_runner_utils %PYTHON %s | FileCheck %s - -import numpy as np -import os -import sys - -_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(_SCRIPT_PATH) -from tools import mlir_pytaco_api as pt - -i, j = pt.get_index_vars(2) -A = pt.tensor([2, 3]) -B = pt.tensor([2, 3]) -A.insert([0, 1], 10.3) -A.insert([1, 1], 40.7) -A.insert([0, 2], -11.3) -A.insert([1, 2], -41.7) - -B[i, j] = abs(A[i, j]) -indices, values = B.get_coordinates_and_values() -passed = np.array_equal(indices, [[0, 1], [0, 2], [1, 1], [1, 2]]) -passed += np.allclose(values, [10.3, 11.3, 40.7, 41.7]) - -B[i, j] = pt.ceil(A[i, j]) -indices, values = B.get_coordinates_and_values() -passed += np.array_equal(indices, [[0, 1], [0, 2], [1, 1], [1, 2]]) -passed += np.allclose(values, [11, -11, 41, -41]) - -B[i, j] = pt.floor(A[i, j]) -indices, values = B.get_coordinates_and_values() -passed += np.array_equal(indices, [[0, 1], [0, 2], [1, 1], [1, 2]]) -passed += np.allclose(values, [10, -12, 40, -42]) - -B[i, j] = -A[i, j] -indices, values = B.get_coordinates_and_values() -passed += np.array_equal(indices, [[0, 1], [0, 2], [1, 1], [1, 2]]) -passed += np.allclose(values, [-10.3, 11.3, -40.7, 41.7]) - -# CHECK: Number of passed: 8 -print("Number of passed:", passed) diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/test_true_dense_tensor_algebra.py b/mlir/test/Integration/Dialect/SparseTensor/taco/test_true_dense_tensor_algebra.py deleted file mode 100644 index 78bce344e3b6f..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/test_true_dense_tensor_algebra.py +++ /dev/null @@ -1,22 +0,0 @@ -# RUN: env SUPPORTLIB=%mlir_c_runner_utils %PYTHON %s | FileCheck %s - -import numpy as np -import os -import sys - -_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(_SCRIPT_PATH) -from tools import mlir_pytaco_api as pt - -i, j = pt.get_index_vars(2) -# Both tensors are true dense tensors. -A = pt.from_array(np.full([2, 3], 1, dtype=np.float64)) -B = pt.from_array(np.full([2, 3], 2, dtype=np.float64)) -# Define the result tensor as a true dense tensor. The parameter is_dense=True -# is an MLIR-PyTACO extension. -C = pt.tensor([2, 3], dtype=pt.float64, is_dense=True) - -C[i, j] = A[i, j] + B[i, j] - -# CHECK: [3. 3. 3. 3. 3. 3.] -print(C.to_array().reshape(6)) diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/lit.local.cfg b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/lit.local.cfg deleted file mode 100644 index 650ca33613cc6..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/lit.local.cfg +++ /dev/null @@ -1,2 +0,0 @@ -# Files in this directory are tools, not tests. -config.unsupported = True diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py deleted file mode 100644 index c8cb77086ea34..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py +++ /dev/null @@ -1,2279 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -"""Experimental MLIR-PyTACO with sparse tensor support. - -See http://tensor-compiler.org/ for TACO tensor compiler. - -This module implements the Python classes for PyTACO index notation. These -include classes for data types, tensor dimension formats (aka mode formats), -tensor dimension orderings (aka mode ordering), tensor storage formats, and -tensors. - -The PyTACO API doesn't follow the naming conversion required by the style guide -for this module. As such, we first implement the supporting classes and routines -following the style guide, and then define the type aliases and constants to -support the PyTACO API in the pytaco_api module. -""" - -from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union - -import abc -import ctypes -import dataclasses -import enum -import numpy as np -import functools -import operator -import os -import threading - -# Import MLIR related modules. -from mlir import execution_engine -from mlir import ir -from mlir import runtime -from mlir.dialects import arith -from mlir.dialects import bufferization -from mlir.dialects import builtin -from mlir.dialects import func -from mlir.dialects import linalg -from mlir.dialects import sparse_tensor -from mlir.dialects import tensor -from mlir.dialects.linalg.opdsl import lang - -from . import mlir_pytaco_utils as utils - -# TACO naming prefixes. -_TACO_INDEX_PREFIX = "i" -_TACO_TENSOR_PREFIX = "A" - -# Bitwidths for positions and coordinates. -_POS_WIDTH = 0 -_CRD_WIDTH = 0 -# The entry point to the JIT compiled program. -_ENTRY_NAME = "main" - -# Type aliases for type annotation. -_UnaryOp = Callable[[Any], Any] -_BinaryOp = Callable[[Any, Any], Any] -_ExprVisitor = Callable[..., None] -_ExprInfoDict = Dict["IndexExpr", "_ExprInfo"] -_LogicalOp = Callable[[bool, bool], bool] -_ModeFormatOp = Callable[["ModeFormat", "ModeFormat"], "ModeFormat"] -_SubtreeLeafChecker = Optional[Callable[..., bool]] - - -class Type(enum.Enum): - """The data types supported by TACO. - - We use numpy data types to implement the enum data types. - """ - - INT8 = np.int8 - INT16 = np.int16 - INT32 = np.int32 - INT64 = np.int64 - FLOAT16 = np.float16 - FLOAT32 = np.float32 - FLOAT64 = np.float64 - COMPLEX64 = np.complex64 - COMPLEX128 = np.complex128 - - -# All floating point type enums. -_FLOAT_TYPES = (Type.FLOAT16, Type.FLOAT32, Type.FLOAT64) -# All integral type enums. -_INT_TYPES = (Type.INT8, Type.INT16, Type.INT32, Type.INT64) -# All complex type enums. -_COMPLEX_TYPES = (Type.COMPLEX64, Type.COMPLEX128) -# Type alias for any numpy type used to implement the runtime support for the -# enum data types. -_AnyRuntimeType = Union[ - np.int8, - np.int16, - np.int32, - np.int64, - np.float16, - np.float32, - np.float64, - np.complex64, - np.complex128, -] - - -@dataclasses.dataclass(frozen=True) -class DType: - """The data type class. - - We support the TACO API dtype class with an alias of this class. - - The following methods are defined by the TACO API: - is_float: Returns whether the data type represents a floating point value. - is_int: Returns whether the data type represents an integral value. - - Attributes: - kind: A Type enum representing the data type. - value: The numpy data type for the TACO data type. - """ - - kind: Type = Type.FLOAT32 - - def is_float(self) -> bool: - """Returns whether the data type represents a floating point value.""" - return self.kind in _FLOAT_TYPES - - def is_int(self) -> bool: - """Returns whether the data type represents an integral value.""" - return self.kind in _INT_TYPES - - def is_complex(self) -> bool: - """Returns whether the data type represents a complex value.""" - return self.kind in _COMPLEX_TYPES - - @property - def value(self) -> _AnyRuntimeType: - """Returns the numpy dtype for the data type.""" - return self.kind.value - - -def _dtype_to_mlir_str(dtype: DType) -> str: - """Returns the MLIR string for the given dtype.""" - dtype_to_str = { - Type.INT16: "i8", - Type.INT16: "i16", - Type.INT32: "i32", - Type.INT64: "i64", - Type.FLOAT16: "f16", - Type.FLOAT32: "f32", - Type.FLOAT64: "f64", - Type.COMPLEX64: "complex", - Type.COMPLEX128: "complex", - } - return dtype_to_str[dtype.kind] - - -def _nptype_to_taco_type(ty: np.dtype) -> DType: - """Returns the TACO type for the given numpy type.""" - nptype_to_dtype = { - np.int8: Type.INT8, - np.int16: Type.INT16, - np.int32: Type.INT32, - np.int64: Type.INT64, - np.float16: Type.FLOAT16, - np.float32: Type.FLOAT32, - np.float64: Type.FLOAT64, - np.complex64: Type.COMPLEX64, - np.complex128: Type.COMPLEX128, - } - return DType(nptype_to_dtype[ty]) - - -def _mlir_type_from_taco_type(dtype: DType) -> ir.Type: - """Returns the MLIR type corresponding to the given TACO type.""" - dtype_to_irtype = { - Type.INT8: ir.IntegerType.get_signless(8), - Type.INT16: ir.IntegerType.get_signless(16), - Type.INT32: ir.IntegerType.get_signless(32), - Type.INT64: ir.IntegerType.get_signless(64), - Type.FLOAT16: ir.F16Type.get(), - Type.FLOAT32: ir.F32Type.get(), - Type.FLOAT64: ir.F64Type.get(), - Type.COMPLEX64: ir.ComplexType.get(ir.F32Type.get()), - Type.COMPLEX128: ir.ComplexType.get(ir.F64Type.get()), - } - return dtype_to_irtype[dtype.kind] - - -def _ctype_pointer_from_array(array: np.ndarray) -> ctypes.pointer: - """Returns the ctype pointer for the given numpy array.""" - return ctypes.pointer(ctypes.pointer(runtime.get_ranked_memref_descriptor(array))) - - -class ModeFormat(enum.Enum): - """The tensor dimension storage format class. - - We support the TACO API mode_format class with an alias of this class. - - In TACO, a tensor dimension is called a mode and the storage format for a - tensor dimension is called a mode format. - """ - - DENSE = sparse_tensor.DimLevelType.dense - COMPRESSED = sparse_tensor.DimLevelType.compressed - - -def _mode_format_operation(a: ModeFormat, b: ModeFormat, op: _LogicalOp) -> ModeFormat: - """Implements the given operator on ModeFormat.""" - return ( - ModeFormat.COMPRESSED - if op(a == ModeFormat.COMPRESSED, b == ModeFormat.COMPRESSED) - else ModeFormat.DENSE - ) - - -def _mode_format_estimator(op: _BinaryOp) -> _ModeFormatOp: - """Produces a ModeFormat operator for the given binary operator. - - The ModeFormat operator is used as a heuristic to derive the destination - dimension sparsity from the source dimension sparsity. In particular, if the - binary operator produces a disjunction of the zero values from its source - operands, such as the MUL operator, we return a ModeFormat operator that - uses operator.or_. That is, we estimate that a dimension for the MUL - operation result to be sparse if either of its source operands is sparse. - - On the other hand, if the binary operator produces a conjunction of the - zero values from its source operands, such as the ADD operator, we return - a ModeFormat operator that uses operator.and_. In this case, we estimate - that a dimension for the ADD operation result to be sparse if both of its - source operands are sparse. - - Args: - op: A _BinaryOp object representing a supporting operator on tensors. - - Returns: - A ModeFormatOp for estimating the destination dimension sparsity from - the source dimension sparsity. - """ - conjunction = functools.partial(_mode_format_operation, op=operator.and_) - disjunction = functools.partial(_mode_format_operation, op=operator.or_) - return conjunction if op(0, 1) != 0 else disjunction - - -def _all_instance_of(collection: Iterable, cls: Any) -> bool: - """Returns true if all elements of the iterable is an instance of cls.""" - return all(isinstance(e, cls) for e in collection) - - -def _identity_ordering(rank: int) -> List[int]: - """Returns the identity ordering for tensor of given rank.""" - return list(range(rank)) - - -@dataclasses.dataclass(frozen=True) -class ModeOrdering: - """The tensor dimension ordering class. - - We support the TACO API mode_ordering class with an alias of this class. - - Attributes: - ordering: A list of integers representing the ordering of the tensor - dimensions. - """ - - ordering: List[int] - - def __post_init__(self) -> None: - """Verifies the value in ordering. - - Raises: - ValueError: If ordering is not a list of integers. - """ - if not isinstance(self.ordering, list) or not _all_instance_of( - self.ordering, int - ): - raise ValueError("Ordering must be a list of integers: " f"{self.ordering}") - # Check that ordering is a permutation of the dimension numbers. - if sorted(self.ordering) != _identity_ordering(self.rank()): - raise ValueError( - f"Invalid ordering: {self.ordering} != " - f"permutation{_identity_ordering(self.rank())}." - ) - - def rank(self) -> int: - """Returns the number of dimensions represented by the ordering.""" - return len(self.ordering) - - -@dataclasses.dataclass(frozen=True) -class ModeFormatPack: - """The tensor dimension format class. - - We support the TACO API mode_format_pack class with an alias of this class. - - The storage format of a tensor contains one mode_format for each tensor - dimension. - - Attributes: - formats: A list of ModeFormat representing the storage format for each of - the tensor dimension. - """ - - formats: List[ModeFormat] - - def __post_init__(self) -> None: - """Verifies the value in formats. - - Raises: - ValueError: If formats is not a list of ModeFormats. - """ - if not isinstance(self.formats, list) or not _all_instance_of( - self.formats, ModeFormat - ): - raise ValueError("Formats must be a list of ModeFormat: " f"{self.formats}") - - def rank(self) -> int: - """Returns the number of dimensions represented by the format pack.""" - return len(self.formats) - - -@dataclasses.dataclass -class Format: - """The tensor format class defined by the TACO API. - - Attributes: - format_pack: A ModeFormatPack representing the storage format for the tensor - dimensions. - ordering: A ModeOrdering representing the tensor dimension ordering in the - storage. - """ - - format_pack: ModeFormatPack - ordering: Optional[ModeOrdering] = None - - def __post_init__(self) -> None: - """Verifies and fixes up the values in format_pack and ordering. - - Verifies and fixes up the values in format_pack and ordering to supports the - initializer syntax defined by the TACO API. If format_pack is a list of - ModeFormat, replaces it with ModeFormatPack constructed from the list. If - ordering is not provided, set ordering to the natural ordering for the rank - corresponding to format_pack. - - Raises: - ValueError: If format_pack is not an instance of ModeFormatPack or if - ordering is not an instance of ModeOrdering. - """ - if isinstance(self.format_pack, list): - if not _all_instance_of(self.format_pack, ModeFormat): - raise ValueError(f"Expected a list of ModeFormat: {self.format_pack}") - self.format_pack = ModeFormatPack(self.format_pack) - if not isinstance(self.format_pack, ModeFormatPack): - raise ValueError(f"Expected ModeFormatpack: {self.format_pack}") - - if self.ordering is None: - self.ordering = ModeOrdering(list(range(self.rank()))) - if isinstance(self.ordering, list): - if not _all_instance_of(self.ordering, int): - raise ValueError(f"Expected a list of integer: {self.ordering}") - self.ordering = ModeOrdering(self.ordering) - if not isinstance(self.ordering, ModeOrdering): - raise ValueError(f"Expected ModeOrdering: {self.ordering}") - - if self.format_pack.rank() != self.ordering.rank(): - raise ValueError( - "Inconsistent ModeFormatPack and ModeOrdering: " - f"len({self.format_pack}) != " - f"len({self.ordering})" - ) - - def rank(self) -> int: - """Returns the number of dimensions represented by the format.""" - return self.format_pack.rank() - - def get_permutation_and_sparsity(self) -> Tuple[np.ndarray, np.ndarray]: - """Constructs the numpy arrays for the permutation and sparsity.""" - perm = np.array(self.ordering.ordering, dtype=np.ulonglong) - a = [f.value for f in self.format_pack.formats] - sparse = np.array(a, dtype=np.uint8) - return (perm, sparse) - - def mlir_tensor_attr(self) -> Optional[sparse_tensor.EncodingAttr]: - """Constructs the MLIR attributes for the tensor format.""" - order = ( - range(self.rank()) if (self.ordering is None) else self.ordering.ordering - ) - mlir_storage_format = [f.value for f in self.format_pack.formats] - return sparse_tensor.EncodingAttr.get( - mlir_storage_format, - ir.AffineMap.get_permutation(order), - _POS_WIDTH, - _CRD_WIDTH, - ) - - -def _make_format( - formats: List[ModeFormat], ordering: Optional[List[int]] = None -) -> Format: - """Constructs a format from a list of ModeFormat and an optional ordering. - - Args: - formats: A list of ModeFormat, one for each dimension of a tensor. - ordering: An optional list of integer, for the ordering of the tensor - dimensions. When an ordering is not given, the identity ordering is used. - - Returns: - A tensor format object. - - Raises: - ValueError: If formats is not a list of ModeFormat or the length of formats - is not consistent with the len of ordering. - """ - ordering = ordering or _identity_ordering(len(formats)) - return Format(ModeFormatPack(formats), ModeOrdering(ordering)) - - -class IndexExpr(abc.ABC): - """The index notation base class. - - We support the TACO API index_expression class with an alias of this class. - """ - - def _verify_operand_and_build_expr(self, rhs, op: _BinaryOp) -> "_BinaryExpr": - """Verifies the RHS operand and returns a binary expression. - - Args: - rhs: The RHS of the binary operation, which could be any Python object - from user inputs. - op: A _BinaryOp object representing the binary operator. - - Raises: - ValueError: If rhs is not an IndexExpr. - """ - if not isinstance(rhs, IndexExpr): - raise ValueError(f"Expected IndexExpr: {rhs}") - return _BinaryExpr(op, self, rhs) - - def _build_unary_expr(self, op: _UnaryOp) -> "_UnaryExpr": - """Build a unary expression. - - Args: - op: A _UnaryOp object representing the unary operation. - """ - return _UnaryExpr(op, self) - - def __add__(self, rhs) -> "_BinaryExpr": - """Defines the operator +. - - Args: - rhs: The value being added, which could be any Python object from user - inputs. - - Returns: - A _BinaryExpr object representing the operation. - - Raises: - ValueError: If rhs is not an IndexExpr. - """ - return self._verify_operand_and_build_expr(rhs, operator.add) - - def __mul__(self, rhs) -> "_BinaryExpr": - """Defines the operator *. - - Args: - rhs: The value being multiplied, which could be any Python object from - user inputs. - - Returns: - A _BinaryExpr object representing the operation. - - Raises: - ValueError: If rhs is not an IndexExpr. - """ - return self._verify_operand_and_build_expr(rhs, operator.mul) - - def __abs__(self) -> "_UnaryExpr": - """Defines the operator abs. - - Returns: - A _UnaryExpr object representing the operation. - """ - return self._build_unary_expr(operator.abs) - - def __neg__(self) -> "_UnaryExpr": - """Defines the operator neg. - - Returns: - A _UnaryExpr object representing the operation. - """ - return self._build_unary_expr(operator.neg) - - def __sub__(self, rhs) -> "_BinaryExpr": - """Defines the operator -. - - Args: - rhs: The value being subtracted, which could be any Python object from - user inputs. - - Returns: - A _BinaryExpr object representing the operation. - - Raises: - ValueError: If rhs is not an IndexExpr. - """ - return self._verify_operand_and_build_expr(rhs, operator.sub) - - @abc.abstractmethod - def _visit( - self, func: _ExprVisitor, args, *, leaf_checker: _SubtreeLeafChecker = None - ) -> None: - """A post-order visitor. - - Args: - func: A callable applied to each node in the expression tree. - args: The variable-length arguments passed to the callable. These - arguments are grouped as an iterable and will be unpacked before passing - to the callable. This is to enable the keyword argument only syntax - after this argument. - leaf_checker: A callable object to identify nodes that should be treated - as leaf nodes to support partial tree visiting. - """ - pass - - @abc.abstractmethod - def _emit_expression( - self, - expr_to_opnd: Dict["IndexExpr", lang.OperandDef], - expr_to_info: _ExprInfoDict, - ) -> lang.ScalarExpression: - """Emits MLIR for the expression tree. - - Args: - expr_to_opnd: A dictionary for looking up structured op input operands for - the input nodes of the structured op. - expr_to_info: A dictionary for looking up code generation information for - expressions. - - Returns: - A linalg dialect ScalarExpression for the expression. - """ - pass - - @abc.abstractmethod - def dtype(self) -> DType: - """Returns the data type for the result of the expression.""" - pass - - def _emit_structured_op(self, expr_to_info: _ExprInfoDict) -> None: - """Emits a structured op in the linalg dialect for the expression tree. - - We define a DefineOpcallable in the domain specific language for the linalg - dialect and execute the callable to generate the structured op. Self is the - root of the expression tree for the structured op. - - Args: - expr_to_info: A dictionary for looking up code generation information for - expressions. - """ - op_info = expr_to_info[self].structop_info - op_name = op_info.dst_name - op_def = lang.LinalgOpDef(name=op_name) - op_callable = lang.DefinedOpCallable(op_name, op_def) - - # Collect the input expression nodes for the structured op. - expr_inputs = [] - self._visit( - _gather_structured_op_input, - (self, expr_to_info, expr_inputs), - leaf_checker=_is_structured_op_leaf, - ) - - # Create a linalg structured op operand for each input expression node and - # build a dictionary for looking up the information. - expr_to_input_opnd = { - e: _emit_structured_op_input(e, expr_to_info, op_def) for e in expr_inputs - } - - # Emit the expression tree, which produces the value assigned to the - # destination tensor. - value = self._emit_expression(expr_to_input_opnd, expr_to_info) - # Emit the structured op representation for the destination tensor. - dst_opnd = _emit_operand( - op_def, - op_info.dst_indices, - op_info.dst_name, - lang.OperandKind.OUTPUT_TENSOR, - ) - dst_dim_syms = _mlir_dimensions_from_index_vars(op_info.dst_indices) - dst_use = lang.TensorUse(dst_opnd, dst_dim_syms) - - expr_info = expr_to_info[self] - # If the structured op reduces some indices, explicitly represent the - # reduction. This is done by generating a ReduceFn for the dimensions being - # reduced in the linalg dialect and calling the function with the value - # being reduced. We only support add reduction currently. - if expr_info.reduce_indices: - reduce_dims = _mlir_dimensions_from_index_vars(expr_info.reduce_indices) - value = lang.ReduceFn.add[reduce_dims](value) - - # Emit the assignment as a comprehension in the linalg dialect. - comp = lang.Comprehension((dst_use, value)) - op_def.comprehensions.append(comp) - - # The structured op in the linalg dialect requires an explicit - # initialization for the destination tensor. Emit MLIR to initialize the - # destination tensor. - init = op_info.emit_tensor_init() - - # Collect MLIR values for the linalg input operands, with the assumption - # that dictionary preserves the insertion order. - args = [ - expr_to_info[expr].mlir_value for expr, opnd in expr_to_input_opnd.items() - ] - # Execute the DefineOpcallable object for the linalg dialect operation to - # emit MLIR for the linalg structured op. - expr_info.mlir_value = op_callable(*args, outs=[init]) - - def _identify_structured_ops( - self, - expr_to_info: _ExprInfoDict, - dst: "Tensor", - dst_indices: Tuple["IndexVar", ...], - ) -> List["IndexExpr"]: - """Returns expression nodes for the roots of the identified structured ops. - - A structured op in the linalg dialect only supports reduction performed on - the whole expression. If the expression tree contains reduction that are - performed on part of the expression tree, the expression tree needs to be - implemented with multiple structured ops. This routine identifies all the - expression nodes that contain reduction as the root of structured ops in the - linalg dialect. - - Args: - expr_to_info: A dictionary for looking up code generation information for - expressions. - dst: A destination Tensor that accepts the value of the expression tree. - dst_indices: The indices used by the destination index expression. - - Returns: - An ordered list of IndexExpr for the root expressions of the structured - ops, where child expressions go before parent expressions that use their - results. - """ - reduce_indices = tuple(set(expr_to_info[self].src_indices) - set(dst_indices)) - for reduce_index in reduce_indices: - _mark_structured_op_root(self, reduce_index, expr_to_info) - - self._visit(_accumulate_reduce_indices, (expr_to_info,)) - structop_roots = [] - self._visit(_gather_structured_op, (expr_to_info, structop_roots)) - - # Handle the root of the top level expression. - if not structop_roots or structop_roots[-1] != self: - # The top level expression is not a reduction. Add the top level - # expression as a structured op root. - structop_roots.append(self) - - # Use user specified information for the destination tensor to build an - # _StructOpInfo for the top level expression. - expr_to_info[self].structop_info = _StructOpInfo( - dst_indices, tuple(dst.shape), dst.dtype, dst.name, dst.format - ) - - return structop_roots - - def _validate_and_collect_expr_info( - self, - dst: "Tensor", - dst_indices: Tuple["IndexVar", ...], - ) -> _ExprInfoDict: - """Propagates expression information for validation. - - Propagates the indices used by child expression nodes to parent expression - nodes. Also collects and validates the sizes for the dimensions - corresponding to the indices. - - Args: - dst: A destination Tensor that accepts the value of the expression tree. - dst_indices: The indices used by the destination index expression. - - Raises: - ValueError if there is any inconsistency in indices or dimensional - values. - - Returns: - A dictionary of (IndexExpr, _ExprInfo). - """ - expr_to_info = {} - # Validate the expression tree and construct expression information. - self._visit(_validate_and_collect_expr_info, (expr_to_info,)) - - # Validate the destination dimension information. - info = expr_to_info[self] - index_to_dim_info = {i: d for i, d in zip(info.src_indices, info.dim_infos)} - for ( - i, - d, - ) in zip(dst_indices, dst.shape): - if i not in index_to_dim_info: - raise ValueError( - "Destination IndexVar not used in the " f"source expression: {i}" - ) - else: - if d != index_to_dim_info[i].dim and index_to_dim_info[i].dim != -1: - raise ValueError( - f"Inconsistent destination dimension for {i}: " - f"{d} vs {index_to_dim_info[i].dim}" - ) - - return expr_to_info - - def _emit_assignment( - self, - module: ir.Module, - dst: "Tensor", - dst_indices: Tuple["IndexVar", ...], - expr_to_info: _ExprInfoDict, - input_accesses: List["Access"], - ) -> None: - """Emits an MLIR function for assigning the expression to a tensor.""" - input_types = [a.tensor.mlir_tensor_type() for a in input_accesses] - - # Build the kernel for the operations. - with ir.InsertionPoint(module.body): - - @func.FuncOp.from_py_func(*input_types, name=_ENTRY_NAME) - def linalg_funcop(*args): - # Set up the mapping from the Access nodes to their MLIR values. - for e, mlir in zip(input_accesses, args): - expr_to_info[e].mlir_value = mlir - - # Emit structured ops in the linalg dialect to implement the assignment. - for structop_root in self._identify_structured_ops( - expr_to_info, dst, dst_indices - ): - structop_root._emit_structured_op(expr_to_info) - dst._record_stats(expr_to_info[structop_root].structop_info) - - # The function returns the MLIR value of the root expression. - return expr_to_info[self].mlir_value - - linalg_funcop.func_op.attributes[ - "llvm.emit_c_interface" - ] = ir.UnitAttr.get() - - def get_input_accesses(self) -> List["Access"]: - """Compute the list of input accesses for the expression.""" - input_accesses = [] - self._visit(_gather_input_accesses_index_vars, (input_accesses,)) - return input_accesses - - def compile( - self, - dst: "Tensor", - dst_indices: Tuple["IndexVar", ...], - ) -> execution_engine.ExecutionEngine: - """Compiles the tensor assignment dst[dst_indices] = expression. - - Args: - dst: The destination tensor. - dst_indices: The tuple of IndexVar used to access the destination tensor. - - Returns: - The execution engine for the tensor assignment. - - Raises: - ValueError: If the expression is not proper or not supported. - """ - expr_to_info = self._validate_and_collect_expr_info(dst, dst_indices) - input_accesses = self.get_input_accesses() - - # Build and compile the module to produce the execution engine. - with ir.Context(), ir.Location.unknown(): - module = ir.Module.create() - self._emit_assignment( - module, dst, dst_indices, expr_to_info, input_accesses - ) - engine = utils.compile_and_build_engine(module) - - return engine - - -class _AtomicCounter: - """An atomic counter.""" - - def __init__(self): - self._counter = 0 - self._counter_lock = threading.Lock() - - def increment(self) -> int: - """Increments the counter by one and returns the old value.""" - old_value = self._counter - with self._counter_lock: - self._counter = self._counter + 1 - return old_value - - -class IndexVar(IndexExpr): - """The tensor index class. - - We support the TACO API index_var class with an alias of this class. - - An IndexVar object represents an index variable in tensor index notation. - - Attributes: - name: A unique string name of the IndexVar. - """ - - _counter = _AtomicCounter() - - def __init__(self): - id = self._counter.increment() - self._name = f"{_TACO_INDEX_PREFIX}{id}" - - def __repr__(self) -> str: - return f"IndexVar(name={repr(self._name)})" - - @property - def name(self) -> str: - """Returns the name of the IndexVar.""" - return self._name - - def _visit( - self, func: _ExprVisitor, args, *, leaf_checker: _SubtreeLeafChecker = None - ) -> None: - """A post-order visitor.""" - if leaf_checker: - assert leaf_checker(self, *args) - func(self, *args) - - def _emit_expression( - self, - expr_to_opnd: Dict[IndexExpr, lang.OperandDef], - expr_to_info: _ExprInfoDict, - ) -> lang.ScalarExpression: - """Emits a index value casted to the data type of the tensor expression.""" - dim = getattr(lang.D, self.name) - index = lang.index(dim) - int_value = lang.TypeFn.cast_unsigned(lang.TV.I64, index) - return lang.TypeFn.cast_unsigned(lang.T, int_value) - - def dtype(self) -> DType: - """Returns the data type for the index value. - - This is unreachable for IndexVar. - """ - assert 0 - - -def get_index_vars(n: int) -> List[IndexVar]: - """Returns a list of n IndexVar. - - This routine is defined by the TACO API. - - Args: - n: An integer representing the number of IndexVar to get. - - Returns: - A list of IndexVar. - - Raises: - ValueError: if n is not a positive integer. - """ - if not isinstance(n, int) or n <= 0: - raise ValueError(f"Expected an integer: {n}.") - # If lock contention ever becomes an issue, we could implement a bulk getter - # that returns a range by only claiming the lock once. - return [IndexVar() for i in range(n)] - - -def _mlir_symbols_from_index_vars( - index_vars: Tuple[IndexVar, ...] -) -> Tuple[lang.SymbolDef, ...]: - """Returns a tuple of MLIR symbols for the given tuple of index_var.""" - return tuple(getattr(lang.S, i.name) for i in index_vars) - - -def _mlir_dimensions_from_index_vars( - index_vars: Tuple[IndexVar, ...] -) -> Tuple[lang.DimDef, ...]: - """Returns a tuple of MLIR dimensions for the given tuple of index_var.""" - return tuple(getattr(lang.D, i.name) for i in index_vars) - - -def _mlir_tensor_type( - dtype: DType, shape: Tuple[int, ...], attr: Optional[sparse_tensor.EncodingAttr] -) -> ir.RankedTensorType: - """Returns an MLIR tensor type. - - Args: - dtype: An DType object for the element data type of the tensor. - shape: A tuple of integer for the shape of the tensor. - attr: An optional MLIR sparse tensor attribute, only provided if the tensor - is a sparse tensor. - - Returns: - An MLIR ranked tensor type. - """ - ir_type = _mlir_type_from_taco_type(dtype) - return ir.RankedTensorType.get(shape, ir_type, attr) - - -@dataclasses.dataclass(frozen=True) -class _StructOpInfo: - """Information for generating a structured op in the linalg dialect. - - This information is associated with an expression node that serves as the - root for an expression subtree implemented with a structured op. - - Attributes: - dst_indices: A tuple of IndexVar, representing the result dimensions of the - structured op. This is used to construct the temporary variable for the - tensor to hold the structured op result. - dst_dims: A tuple of int, representing the result shape of the structured - op. - dst_dtype: A DType representing the data type of the structured op result. - dst_name: A string representing the name of the structured op result. - dst_format: An optional Format object representing the destination tensor - format. None represents a true dense tensor. - """ - - dst_indices: Tuple[IndexVar, ...] - dst_dims: Tuple[int, ...] - dst_dtype: DType - dst_name: str - dst_format: Optional[Format] - - def __post_init__(self) -> None: - """Verifies the integrity of the attribute values.""" - assert len(self.dst_indices) == len(self.dst_dims) - - def emit_tensor_init(self) -> ir.RankedTensorType: - """Returns an initialization for the destination tensor.""" - if self.dst_format is None or self.dst_format.rank() == 0: - # Initialize the dense tensor. - ir_type = _mlir_type_from_taco_type(self.dst_dtype) - empty = tensor.EmptyOp(self.dst_dims, ir_type).result - zero = arith.ConstantOp(ir_type, 0.0) - return linalg.fill(zero, outs=[empty]) - - # Initialize the sparse tensor. - mlir_type = _mlir_tensor_type( - self.dst_dtype, self.dst_dims, self.dst_format.mlir_tensor_attr() - ) - index_type = ir.IndexType.get() - return bufferization.AllocTensorOp(mlir_type, [], None, None, None) - - -class _Stats: - """Information to describe how a tensor expression is implemented. - - Currently, we only record the temporary tensors introduced for splitting the - original expression. - """ - - def __init__(self): - self._temps = [] - - def __repr__(self) -> str: - return f"_Stats({repr(self._temps)})" - - def add_element(self, structop: _StructOpInfo): - """Adds a temporary tensor.""" - self._temps.append(structop) - - def get_total(self) -> int: - """Gets the total number of temporary tensors.""" - return len(self._temps) - - def _get_element(self, idx: int) -> _StructOpInfo: - """Gets the ith temporary tensor.""" - assert idx < self.get_total() - return self._temps[idx] - - def get_dimensions(self, idx: int) -> Tuple[int]: - """Gets the dimensions for the ith temporary tensor.""" - return self._get_element(idx).dst_dims - - def get_formats(self, idx: int) -> Tuple[ModeFormat]: - """Gets the ModeFormats for the ith temporary tensor.""" - return tuple(self._get_element(idx).dst_format.format_pack.formats) - - -class _SparseValueInfo(enum.Enum): - """Describes how a sparse tensor value is stored. - _UNPACKED: The sparse tensor value is stored as (coordnates, values) in - Python. - _PACKED: The sparse tensor value is stored as a C pointer to a packed MLIR - sparse tensor. - """ - - _UNPACKED = 0 - _PACKED = 1 - - -@dataclasses.dataclass(frozen=True) -class _Assignment: - """Records an assignment to a tensor T as T[indices] = expression.""" - - indices: Tuple["IndexVar", ...] - expression: "IndexExpr" - - -class Tensor: - """The tensor class. - - We support the TACO API tensor class with an alias of this class. - - This class is part of the TACO API with the following methods: - insert: Inserts a value to the given coordinate in the tensor. - to_array: Returns a numpy ndarray for the tensor. - - TACO API also defines the following arrtibutes for the class: - dtype: A dtype object representing the data type of the tensor. - format: A format object representing the storage format of the tensor. - name: A string object representing the name of the tensor. - order: An integral rank of the tensor. - shape: A list of integers representing the shape of the tensor. - - We currently ignore the tensor dimension ordering for dense tensor. - """ - - _counter = _AtomicCounter() - - def _get_unique_name(self) -> str: - """Returns a unique name for creating a new Tensor.""" - return f"{_TACO_TENSOR_PREFIX}{self._counter.increment()}" - - def _init_format(self, fmt: Union[ModeFormat, List[ModeFormat], Format]) -> None: - """Process the fmt argument for the Tensor constructor. - - Args: - fmt: This argument can be a ModeFormat, List[ModeFormat], or format. If - this argument is a ModeFormat, uses this ModeFormat for all the tensor - dimensions. If this argument is a list of ModeFormat, the len of the - list should equal to the rank of the tensor. If this argument is a - format, uses it for the format of the tensor. - - Raises: - ValueError: If fmt is not one of the expected type or is inconsistent - with the rank of the tensor. This is because fmt could be an users - input. - """ - if isinstance(fmt, ModeFormat): - self._format = _make_format([fmt] * self.order) - elif isinstance(fmt, list): - if len(fmt) == self.order and isinstance(fmt[0], ModeFormat): - self._format = _make_format(fmt) - else: - raise ValueError( - "Inconsistent shape and format: " f"{self._shape}, {fmt}." - ) - elif isinstance(fmt, Format): - if fmt.rank() != self.order: - raise ValueError( - "Inconsistent shape and format: " f"{self._shape}, {fmt}." - ) - else: - self._format = fmt - else: - raise ValueError(f"Invalid format argument: {fmt}.") - - def __init__( - self, - value_or_shape: Optional[ - Union[List[int], Tuple[int, ...], complex, float, int] - ] = None, - fmt: Optional[Union[ModeFormat, List[ModeFormat], Format]] = None, - dtype: Optional[DType] = None, - name: Optional[str] = None, - is_dense: bool = False, - ): - """The tensor constructor interface defined by TACO API. - - Args: - value_or_shape: This argument is optional and can be int, float, - List[int], or Tuple[int, ...]. If this argument is an int or float, - creates a scalar tensor and initializes it with the value. If this - argument is a list or tuple of int, uses it as the shape to create a - tensor. - fmt: This argument can be a ModeFormat, List[ModeFormat], or format. If - this argument is a ModeFormat, uses this ModeFormat for all the tensor - dimensions. If this argument is a list of ModeFormat, the len of the - list should equal to the rank of the tensor. If this argument is a - format, uses it for the format of the tensor. - dtype: An object of dtype, representing the data type of the tensor. - name: A string name of the tensor. If a name is not given, creates a - unique name for the tensor. - is_dense: A boolean variable to indicate whether the tensor is a dense - tensor without any sparsity annotation. - - Raises: - ValueError: If there is any inconsistency among the input arguments. - """ - # Take care of the argument default values common to both sparse tensors - # and dense tensors. - dtype = dtype or DType(Type.FLOAT32) - self._name = name or self._get_unique_name() - self._assignment = None - self._engine = None - self._sparse_value_location = _SparseValueInfo._UNPACKED - self._dense_storage = None - self._dtype = dtype - - if is_dense: - assert fmt is None - assert ( - isinstance(value_or_shape, tuple) or isinstance(value_or_shape, list) - ) and _all_instance_of(value_or_shape, int) - self._shape = value_or_shape - self._format = None - return - - fmt = fmt or ModeFormat.COMPRESSED - # We currently use _coords and _values to host the sparse tensor value with - # COO format, and _dense_storage to host the dense tensor value. We don't - # support the conversion between the two storages. - self._coords = [] - self._values = [] - self._stats = _Stats() - if ( - value_or_shape is None - or isinstance(value_or_shape, int) - or isinstance(value_or_shape, float) - or isinstance(value_or_shape, complex) - ): - # Create a scalar tensor and ignore the fmt parameter. - self._shape = [] - self._format = _make_format([], []) - if value_or_shape is not None: - self._dense_storage = np.array(value_or_shape, dtype=self._dtype.value) - elif ( - isinstance(value_or_shape, tuple) or isinstance(value_or_shape, list) - ) and _all_instance_of(value_or_shape, int): - # Create a tensor with the specified shape and format. - self._shape = list(value_or_shape) - self._init_format(fmt) - else: - raise ValueError( - "Invalid first argument. " - "Must be a tuple or list for a shape or a single value" - f"if initializing a scalar tensor: {value_or_shape}." - ) - - def _set_packed_sparse_tensor(self, pointer: ctypes.c_void_p) -> None: - """Records the MLIR sparse tensor pointer.""" - self._sparse_value_location = _SparseValueInfo._PACKED - self._packed_sparse_value = pointer - - def is_unpacked(self) -> bool: - """Returns true if the tensor value is not packed as MLIR sparse tensor.""" - return self._sparse_value_location == _SparseValueInfo._UNPACKED - - def unpack(self) -> None: - """Unpacks the MLIR sparse tensor representation.""" - if self.is_dense() or self.is_unpacked(): - return - - # Use the output MLIR sparse tensor pointer to retrieve the COO-flavored - # values and verify the values. - rank, nse, shape, values, indices = utils.sparse_tensor_to_coo_tensor( - self._packed_sparse_value, self._dtype.value - ) - assert rank == self.order - assert np.array_equal(self.shape, shape) - assert nse == len(values) - self._coords = indices - self._values = values - self._sparse_value_location = _SparseValueInfo._UNPACKED - - def __repr__(self) -> str: - self._sync_value() - self.unpack() - value_str = ( - f"{repr(self._dense_storage)})" - if self.is_dense() - else f"{repr(self._coords)} {repr(self._values)})" - ) - return ( - f"Tensor(_name={repr(self._name)} " f"_dtype={repr(self._dtype)} : " - ) + value_str - - def insert(self, coords: List[int], val: Union[complex, float, int]) -> None: - """Inserts a value to the given coordinate. - - Args: - coords: A list of integer coordinates. The length of the list must be the - same as the rank of the tensor. - val: A value being inserted. It is either an integral or a floating point - value. This value will be converted to the data type of the tensor. - - Raises: - ValueError: When there is any problem in the parameters. - """ - if self.is_dense(): - raise ValueError("Insert method is not supported for dense tensors.") - if self._assignment != None or not self.is_unpacked(): - raise ValueError( - "Can't use Insert method for a tensor constructed from a file." - ) - if not isinstance(coords, list): - raise ValueError(f"Non list coordinate detected: {coords}.") - if not _all_instance_of(coords, int): - raise ValueError(f"Non integer coordinate detected: {coords}.") - if len(coords) != self.order or any( - [c < 0 or c >= self._shape[i] for i, c in enumerate(coords)] - ): - raise ValueError("Invalid coordinate for rank: " f"{self.order}, {coords}.") - - if ( - not isinstance(val, int) - and not isinstance(val, float) - and not isinstance(val, complex) - ): - raise ValueError(f"Value is neither int nor float: {val}.") - - self._coords.append(tuple(coords)) - self._values.append(self._dtype.value(val)) - - def is_dense(self) -> bool: - """Returns true if the tensor doesn't have sparsity annotation.""" - return self.order == 0 or self._format is None - - def to_array(self) -> np.ndarray: - """Returns the numpy array for the Tensor. - - This is currenly only implemented for dense Tensor. - """ - if not self.is_dense(): - raise ValueError( - "Conversion from non-dense Tensor " "to numpy array not supported yet." - ) - - self._sync_value() - - return self._dense_storage - - @staticmethod - def from_array(array: np.ndarray) -> "Tensor": - """Returns a dense tensor with the value copied from the input array. - - We currently only support the conversion of float32 and float64 numpy arrays - to Tensor. - - Args: - array: The numpy array that provides the data type, shape and value for - the tensor. - - Returns: - A Tensor object. - - Raises: - ValueError if the data type of the numpy array is not supported. - """ - if array.dtype != np.float32 and array.dtype != np.float64: - raise ValueError(f"Expected floating point value type: {array.dtype}.") - t = Tensor( - array.shape, dtype=_nptype_to_taco_type(array.dtype.type), is_dense=True - ) - t._dense_storage = np.copy(array) - return t - - @staticmethod - def from_coo( - coordinates: List[Tuple[int, ...]], - values: List[_AnyRuntimeType], - fmt: Format, - dtype: DType, - ) -> "Tensor": - """Converts coordinates and values to a sparse tensor representation. - - Args: - coordinates: A list of coordinates with non-zero values. - values: The non-zero values. - fmt: The tensor storage format. - dtype: The tensor element data type. - - Returns: - A tensor with the given non-zero values and storage format. The shape of - the tensor has the minimum size for each dimension to make the given - coordinates valid. - """ - assert isinstance(coordinates, List) and _all_instance_of(coordinates, Tuple) - assert isinstance(values, List) and _all_instance_of(values, dtype.value) - assert isinstance(fmt, Format) - - rank = fmt.rank() - assert all(len(c) == rank and _all_instance_of(c, int) for c in coordinates) - - # Find the maximum coordinate value for each dimension. - max_coordinate = list(map(max, zip(*coordinates))) - # The size of each dimension is one more that such a maximum coordinate - # value. - shape = [c + 1 for c in max_coordinate] - t = Tensor(shape, fmt, dtype=dtype) - t._coords = coordinates - t._values = values - - return tensor - - @staticmethod - def from_file( - filename: str, - fmt: Format, - dtype: DType, - ) -> "Tensor": - """Constructs a sparse tensor using the COO-flavored values from a file. - - Args: - filename: A string for the name of the file that contains the sparse - tensor data. - fmt: The tensor storage format. - dtype: The tensor element data type. - - Returns: - A tensor with the given non-zero values and storage format. The tensor - value is stored as an MLIR sparse tensor. - """ - sparse_tensor, shape = utils.create_sparse_tensor( - filename, fmt.format_pack.formats, _dtype_to_mlir_str(dtype) - ) - t = Tensor(shape.tolist(), fmt, dtype=dtype) - t._set_packed_sparse_tensor(sparse_tensor) - - return t - - def to_file(self, filename: str) -> None: - """Output the tensor value to a file. - - This method evaluates any pending assignment to the tensor and outputs the - tensor value. - - Args: - filename: A string file name. - - Raises: - ValueError: If the tensor is dense, or an unpacked sparse tensor. - """ - self._sync_value() - - if self.is_dense(): - raise ValueError( - "Writing dense tensors without sparsity annotation to " - "file is not supported." - ) - - if self.is_unpacked(): - raise ValueError( - "Writing unpacked sparse tensors to file is not " "supported." - ) - - utils.output_sparse_tensor( - self._packed_sparse_value, - filename, - self._format.format_pack.formats, - _dtype_to_mlir_str(self._dtype), - ) - - @property - def dtype(self) -> DType: - """Returns the data type for the Tensor.""" - return self._dtype - - @property - def format(self) -> Format: - """Returns the storage format for the Tensor.""" - return self._format - - @property - def name(self) -> str: - """Returns the name for the Tensor.""" - return self._name - - @property - def order(self) -> int: - """Returns the rank of the Tensor.""" - return len(self._shape) - - @property - def shape(self) -> List[int]: - """Returns the shape of the Tensor.""" - return self._shape - - def _verify_and_normalize_indices(self, indices) -> Tuple[IndexVar, ...]: - """Verifies and normalizes the indices to access the tensor. - - Args: - indices: The index expression used to access a tensor, which could be any - Python object from user inputs. - - Returns: - A tuple of IndexVar. - - Raises: - ValueError: If indices is not 0 for scalar tensors, or not an IndexVar or - a tuple of IndexVar for other tensors. - """ - if self.order == 0: - if not isinstance(indices, int) or indices != 0: - raise ValueError(f"Expected 0 to index scalar tensors: {indices}") - return () - - if isinstance(indices, IndexVar): - return (indices,) - elif isinstance(indices, tuple) and _all_instance_of(indices, IndexVar): - return indices - - raise ValueError(f"Expected IndexVars: {indices}") - - def __getitem__(self, key) -> "Access": - """Verifies and processes a tensor access. - - In the tensor index notation, a tensor access T[i, j] is represented as - retrieving a value with key (i, j) from the tensor object T in Python. This - routine verifies the key for the tensor access and returns a tensor access - object. - - Args: - key: The key used to access the tensor, which could be any Python object - from user inputs. - - Returns: - The corresponding tensor access object. - - Raises: - ValueError: If key is not an IndexVar or a tuple of IndexVar. - """ - indices = self._verify_and_normalize_indices(key) - return Access(self, indices) - - def __setitem__(self, key, value) -> None: - """Verifies and processes a tensor assignment. - - In the tensor index notation, a tensor assignment "T[i, j] = ..." is - represented as setting a value for a tensor object T via key (i, j) in - Python. This routine verifies the key, evaluates the value, and assigns the - value to the tensor. - - We only support assignment of dense tensor currently. - - Args: - key: The key used to access the tensor, which could be any Python object - from user inputs. - value: The value assigned to the tensor, which could be any Python object - from user inputs. - - Raises: - ValueError: If tensor is not a dense tensor, or the key is not an IndexVar - or a tuple of IndexVar, or the length of the indices is not the same as - the rank of the tensor. - """ - indices = self._verify_and_normalize_indices(key) - if len(indices) != self.order: - raise ValueError( - "Mismatch between indices and tensor rank: " - f"len({indices}) != {self.order}." - ) - - self._assignment = _Assignment(indices, value) - self._engine = None - - def compile(self, force_recompile: bool = False) -> None: - """Compiles the tensor assignment to an execution engine. - - Calling compile the second time does not do anything unless - force_recompile is True. - - Args: - force_recompile: A boolean value to enable recompilation, such as for the - purpose of timing. - - Raises: - ValueError: If the assignment is not proper or not supported. - """ - if self._assignment is None or ( - self._engine is not None and not force_recompile - ): - return - - self._engine = self._assignment.expression.compile( - self, self._assignment.indices - ) - - def compute(self) -> None: - """Executes the engine for the tensor assignment. - - Raises: - ValueError: If the assignment hasn't been compiled yet. - """ - if self._assignment is None: - return - - if self._engine is None: - raise ValueError("Need to invoke compile() before invoking compute().") - - input_accesses = self._assignment.expression.get_input_accesses() - # Gather the pointers for the input buffers. - input_pointers = [a.tensor.ctype_pointer() for a in input_accesses] - if self.is_dense(): - # The pointer to receive dense output is the first argument to the - # execution engine. - arg_pointers = [self.dense_dst_ctype_pointer()] + input_pointers - else: - # The pointer to receive the sparse tensor output is the last argument - # to the execution engine and is a pointer to pointer of char. - arg_pointers = input_pointers + [ - ctypes.pointer(ctypes.pointer(ctypes.c_char(0))) - ] - - # Invoke the execution engine to run the module. - self._engine.invoke(_ENTRY_NAME, *arg_pointers) - - # Retrieve the result. - if self.is_dense(): - result = runtime.ranked_memref_to_numpy(arg_pointers[0][0]) - assert isinstance(result, np.ndarray) - self._dense_storage = result - else: - self._set_packed_sparse_tensor(arg_pointers[-1][0]) - - self._assignment = None - self._engine = None - - def evaluate(self) -> None: - """Evaluates the tensor assignment.""" - self.compile() - self.compute() - - def _sync_value(self) -> None: - """Updates the tensor value by evaluating the pending assignment.""" - if self._assignment is not None: - self.evaluate() - - def mlir_tensor_type(self) -> ir.RankedTensorType: - """Returns the MLIR type for the tensor.""" - mlir_attr = ( - None - if (self._format is None or self.order == 0) - else self._format.mlir_tensor_attr() - ) - return _mlir_tensor_type(self._dtype, tuple(self._shape), mlir_attr) - - def dense_dst_ctype_pointer(self) -> ctypes.pointer: - """Returns the ctypes pointer for the pointer to an MemRefDescriptor. - - For a dense tensor output, the MLIR compiler allocates the storage for - the tensor. This routine returns the pointer to an MLIR MemRefDescriptor for - receiving the tensor. - """ - assert self.is_dense() - mem_ref_desc = runtime.make_nd_memref_descriptor( - self.order, np.ctypeslib.as_ctypes_type(self.dtype.value) - )() - return ctypes.pointer(ctypes.pointer(mem_ref_desc)) - - def ctype_pointer(self) -> ctypes.pointer: - """Returns the ctypes pointer for the pointer to the input tensor.""" - if self.is_dense(): - if self._dense_storage is None: - self._dense_storage = np.zeros(self._shape, self._dtype.value) - return _ctype_pointer_from_array(self._dense_storage) - - if self.is_unpacked(): - shape = np.array(self._shape, np.int64) - indices = np.array(self._coords, np.int64) - values = np.array(self._values, self._dtype.value) - perm, sparse = self.format.get_permutation_and_sparsity() - ptr = utils.coo_tensor_to_sparse_tensor( - shape, values, indices, perm, sparse - ) - else: - ptr = self._packed_sparse_value - - return ctypes.pointer(ctypes.cast(ptr, ctypes.c_void_p)) - - def get_scalar_value(self) -> _AnyRuntimeType: - """Returns the value for the scalar tensor. - - This method also evaluates the assignment to the tensor. - - Raises: - ValueError: If the tensor is not a scalar. - """ - if self.order != 0: - raise ValueError(f"Expected a scalar tensor, got: rank={self.order}") - - self._sync_value() - return self._dense_storage - - def get_coordinates_and_values( - self, - ) -> Tuple[List[Tuple[int, ...]], List[_AnyRuntimeType]]: - """Returns the coordinates and values for the non-zero elements. - - This method also evaluates the assignment to the tensor and unpack the - sparse tensor. - """ - self._sync_value() - - if not self.is_dense(): - self.unpack() - return (self._coords, self._values) - - if self.order == 0: - return ([], self._dense_storage) - - # Coordinates for non-zero elements, grouped by dimensions. - coords_by_dims = self._dense_storage.nonzero() - # Coordinates for non-zero elements, grouped by elements. - coords = np.transpose(coords_by_dims) - values = self._dense_storage[coords_by_dims] - return (coords, values) - - def _record_stats(self, structop: "_StructOpInfo"): - """Collects information for temporary tensors.""" - # Exclude user specified destination tensors. - if structop.dst_name == self.name: - return - - self._stats.add_element(structop) - - -def _emit_operand( - op_def: lang.LinalgOpDef, - indices: Tuple[IndexVar, ...], - name: str, - kind: lang.OperandKind, -) -> lang.OperandDef: - """Emits an operand for a tensor access in the current linalg operation. - - Args: - op_def: A LinalgOpDef representing the current linalg dialect operation. - indices: A tuple of IndexVar used to access the tensor. - name: A unique string name of the tensor. - kind: An OperandKind for the operand. - - Returns: - An OperandDef representing the operand. - """ - dim_sym = _mlir_symbols_from_index_vars(indices) - opnd = lang.OperandDef(kind, lang.T, dim_sym) - op_def.add_operand(name, opnd) - return opnd - - -@dataclasses.dataclass(frozen=True) -class _DimInfo: - """Information for an operand dimension. - - Attributes: - dim: An integer for the size of the dimension. - mode_format: A ModeFormat for the dimension sparsity. - """ - - dim: int - mode_format: ModeFormat - - -def _get_dummy_dim_info() -> _DimInfo: - """Constructs the _DimInfo for an index used in tensor expressions.""" - return _DimInfo(-1, ModeFormat.DENSE) - - -@dataclasses.dataclass() -class _ExprInfo: - """Expression information for validation and code generation. - - Attributes: - src_indices: A tuple of IndexVar for the indices used by the tensors in the - expression tree. - dim_infos: A tuple of _DimInfo, representing the dimension information - corresponding to the src_indices. - reduce_indices: A set of IndexVar for the indices reduced by the expression. - acc_reduce_indices: An accumulated set of IndexVar for the indices reduced - by the expression and its children. - structop_info: Information to support the code generation for a structured - op in the linalg dialect, if the corresponding expression node is the root - of a subtree for a structured op. - mlir_value: The MLIR value generated for the structured op. - """ - - src_indices: Tuple[IndexVar, ...] - dim_infos: Tuple[_DimInfo, ...] - reduce_indices: Optional[Set[IndexVar]] = None - acc_reduce_indices: Optional[Set[IndexVar]] = None - structop_info: Optional[_StructOpInfo] = None - mlir_value: Optional[ir.Value] = None - - def __post_init__(self) -> None: - """Verifies and fix up attribute values. - - Verifies the consistency of the attributes and modifies the default values - to support convenient initializer syntax. - """ - assert len(self.src_indices) == len(self.dim_infos) - self.reduce_indices = self.reduce_indices or set() - self.acc_reduce_indices = self.acc_reduce_indices or set() - - -@dataclasses.dataclass(frozen=True) -class Access(IndexExpr): - """The tensor access class. - - We support the TACO API access class with an alias of this class. - - Attributes: - tensor: A Tensor being accessed. - indices: A tuple of IndexVar, representing the indices used to access the - Tensor. - """ - - tensor: Tensor - indices: Tuple[IndexVar, ...] - - def __post_init__(self) -> None: - """Verifies the tensor and indices for a tensor access. - - Raises: - ValueError: If indices is not a list of IndexVar or the len of indices - doesn't equal to the rank of the tensor. - """ - if not isinstance(self.indices, tuple) or not _all_instance_of( - self.indices, IndexVar - ): - raise ValueError(f"Indices contain non IndexVar: {str(self.indices)}.") - if self.tensor.order != len(self.indices): - raise ValueError( - "Invalid indices for rank: " - f"str{self.tensor.order} != len({str(self.indices)})." - ) - - def __repr__(self) -> str: - # The Tensor __repr__ method evaluates the pending assignment to the tensor. - # We want to define the __repr__ method here to avoid such evaluation of the - # tensor assignment. - indices_str = ", ".join(map(lambda i: i.name, self.indices)) - return f"Tensor({self.tensor.name}) " f"Indices({indices_str})" - - def _emit_expression( - self, - expr_to_opnd: Dict[IndexExpr, lang.OperandDef], - expr_to_info: _ExprInfoDict, - ) -> lang.ScalarExpression: - """Emits a linalg dialect TensorUse expression for the tensor access.""" - assert self in expr_to_opnd - dims = _mlir_dimensions_from_index_vars(self.indices) - return lang.TensorUse(expr_to_opnd[self], dims) - - def _visit( - self, func: _ExprVisitor, args, *, leaf_checker: _SubtreeLeafChecker = None - ) -> None: - if leaf_checker: - assert leaf_checker(self, *args) - func(self, *args) - - def dtype(self) -> DType: - return self.tensor.dtype - - -def _gather_input_accesses_index_vars( - expr: IndexExpr, - input_accesses: List[Access], -) -> None: - """Collects Access nodes.""" - if isinstance(expr, Access) and expr not in input_accesses: - input_accesses.append(expr) - - -def _op_ceil(__a: Any) -> Any: - """A _UnaryOp object for operation ceil.""" - pass - - -def _op_floor(__a: Any) -> Any: - """A _UnaryOp object for operation floor.""" - pass - - -def _op_unary_to_callable(op: _UnaryOp) -> lang.UnaryFnType: - """Returns the linalg dialect function object for the given operation.""" - op_to_callable = { - operator.abs: lang.UnaryFn.abs, - operator.neg: lang.UnaryFn.negf, - _op_ceil: lang.UnaryFn.ceil, - _op_floor: lang.UnaryFn.floor, - } - return op_to_callable[op] - - -@dataclasses.dataclass(frozen=True) -class _UnaryExpr(IndexExpr): - """The representation for a Unary operation. - - Attributes: - op: A _UnaryOp representing the operation. - a: An IndexExpr representing the operand for the operation. - """ - - op: _BinaryOp - a: IndexExpr - - def __post_init__(self) -> None: - """Verifies that the operand being added is an IndexExpr.""" - assert isinstance(self.a, IndexExpr) - - def _emit_expression( - self, - expr_to_opnd: Dict[IndexExpr, lang.OperandDef], - expr_to_info: _ExprInfoDict, - ) -> lang.ScalarExpression: - """Emits the expression tree and returns the expression.""" - # The current expression node is an internal node of the structured op. - if self not in expr_to_opnd: - a = self.a._emit_expression(expr_to_opnd, expr_to_info) - return _op_unary_to_callable(self.op)(a) - - # The current expression is a leaf node of the structured op. That is, it is - # a temporary tensor generated by its child structured op. - op_info = expr_to_info[self].structop_info - assert op_info is not None - dims = _mlir_dimensions_from_index_vars(op_info.dst_indices) - return lang.TensorUse(expr_to_opnd[self], dims) - - def _visit( - self, func: _ExprVisitor, args, *, leaf_checker: _SubtreeLeafChecker = None - ) -> None: - """A post-order visitor.""" - if leaf_checker is None or not leaf_checker(self, *args): - self.a._visit(func, args, leaf_checker=leaf_checker) - func(self, *args) - - def dtype(self) -> DType: - """Returns the data type of the operation.""" - return self.a.dtype() - - -def _op_to_callable(op: _BinaryOp) -> lang.BinaryFnType: - """Returns the linalg dialect function object for the given operation.""" - op_to_callable = { - operator.add: lang.BinaryFn.add, - operator.sub: lang.BinaryFn.sub, - operator.mul: lang.BinaryFn.mul, - } - return op_to_callable[op] - - -@dataclasses.dataclass(frozen=True) -class _BinaryExpr(IndexExpr): - """The representation for a binary operation. - - Attributes: - op: A _BinaryOp representing the binary operation. - a: An IndexExpr representing the first operand of the operation. - b: An IndexExpr representing the second operand of the operation. - """ - - op: _BinaryOp - a: IndexExpr - b: IndexExpr - - def __post_init__(self) -> None: - """Verifies that the operands being added are IndexExpr.""" - assert isinstance(self.a, IndexExpr) and isinstance(self.b, IndexExpr) - - def _emit_expression( - self, - expr_to_opnd: Dict[IndexExpr, lang.OperandDef], - expr_to_info: _ExprInfoDict, - ) -> lang.ScalarExpression: - """Emits the expression tree and returns the expression.""" - # The current expression node is an internal node of the structured op. - if self not in expr_to_opnd: - a = self.a._emit_expression(expr_to_opnd, expr_to_info) - b = self.b._emit_expression(expr_to_opnd, expr_to_info) - return _op_to_callable(self.op)(a, b) - - # The current expression is a leaf node of the structured op. That is, it is - # a temporary tensor generated by its child structured op. - op_info = expr_to_info[self].structop_info - assert op_info is not None - dims = _mlir_dimensions_from_index_vars(op_info.dst_indices) - return lang.TensorUse(expr_to_opnd[self], dims) - - def _visit( - self, func: _ExprVisitor, args, *, leaf_checker: _SubtreeLeafChecker = None - ) -> None: - """A post-order visitor.""" - if leaf_checker is None or not leaf_checker(self, *args): - self.a._visit(func, args, leaf_checker=leaf_checker) - self.b._visit(func, args, leaf_checker=leaf_checker) - func(self, *args) - - def dtype(self) -> DType: - """Returns the data type of the binary operation.""" - return self.a.dtype() - - -def _validate_and_collect_dim_info( - index_to_dim_info: Dict[IndexVar, _DimInfo], - indices: Tuple[IndexVar, ...], - dim_infos: Tuple[_DimInfo, ...], - expr: _BinaryExpr, -) -> None: - """Validates and collects the dimension information for an index notation. - - Validates (indices, dim_infos) against the information collected from other - source operands and is represented by index_to_dim_info. In particular, we - ensure that each IndexVar corresponds to only one dimension size. We also - aggregate the new information represented in (indices, dim_infos) to - index_to_dim_info. - - Args: - index_to_dim: A dictionary of (IndexVar, _DimInfo) collected from the - previous operands. - indices: The IndexVars to be validated. - dim_infos: The dimension information for the IndexVars to be validated. - expr: The binary expression where (indices, dim_infos) is used. - - Raises: - ValueError if there is any problem in the IndexVars or dimensional values. - """ - assert len(indices) == len(dim_infos) - for i, d in zip(indices, dim_infos): - if i not in index_to_dim_info: - index_to_dim_info[i] = d - else: - dim = index_to_dim_info[i].dim - if dim == -1 or d.dim == -1: - dim = dim if dim != -1 else d.dim - elif dim != d.dim: - raise ValueError( - f"Inconsistent source dimension for {i}: " f"{d.dim} vs {dim}" - ) - mode_format = _mode_format_estimator(expr.op)( - index_to_dim_info[i].mode_format, d.mode_format - ) - index_to_dim_info[i] = _DimInfo(d.dim, mode_format) - - -def _validate_and_collect_expr_info( - expr: IndexExpr, - expr_to_info: _ExprInfoDict, -) -> None: - """Validates dimension information and constructs _ExprInfo. - - Validates that dimensional values for the same IndexVar are the same. Collects - a list of IndexVar used by the expression and their corresponding dimensional - values. Constructs an _ExprInfo object to record the information for the - IndexExpr. - - This routine is passed to the post-order visitor as an _ExprVisitor object. - - Args: - expr: The IndexExpr being validated. - expr_to_info: The dictionary of (IndexExpr, _ExprInfo) for recording the - expression information. - - Raises: - ValueError if there is any problem in the IndexVars or dimensional values. - """ - # Objects of class Access can be shared by different expressions. Avoid - # processing Access objects multiple times by skipping the processing if expr - # is already in the dictionary. - if expr in expr_to_info: - return - - if isinstance(expr, IndexVar): - src_indices = (expr,) # A tuple with one element. - dim_infos = (_get_dummy_dim_info(),) # A tuple with one element. - elif isinstance(expr, Access): - src_indices = expr.indices - src_dims = tuple(expr.tensor.shape) - if expr.tensor.format is None: - # Treat each dimension of a dense tensor as DENSE for the purpose of - # calculating temporary tensor storage format. - mode_formats = tuple([ModeFormat.DENSE] * len(src_dims)) - else: - mode_formats = tuple(expr.tensor.format.format_pack.formats) - assert len(src_dims) == len(mode_formats) - dim_infos = tuple([_DimInfo(d, m) for d, m in zip(src_dims, mode_formats)]) - elif isinstance(expr, _UnaryExpr): - a_info = expr_to_info[expr.a] - index_to_dim_info = {i: d for i, d in zip(a_info.src_indices, a_info.dim_infos)} - # Here we rely on the fact that dictionaries keep the insertion order for - # keys and values. - src_indices = tuple(index_to_dim_info.keys()) - dim_infos = tuple(index_to_dim_info.values()) - else: - assert isinstance(expr, _BinaryExpr) - a_info = expr_to_info[expr.a] - index_to_dim_info = {i: d for i, d in zip(a_info.src_indices, a_info.dim_infos)} - b_info = expr_to_info[expr.b] - _validate_and_collect_dim_info( - index_to_dim_info, b_info.src_indices, b_info.dim_infos, expr - ) - # Here we rely on the fact that dictionaries keep the insertion order for - # keys and values. - src_indices = tuple(index_to_dim_info.keys()) - dim_infos = tuple(index_to_dim_info.values()) - - expr_to_info[expr] = _ExprInfo(src_indices, dim_infos) - - -def _mark_structured_op_root( - expr: IndexExpr, - reduce_index: IndexVar, - expr_to_info: _ExprInfoDict, -) -> None: - """Identifies the root expression for a structured op in the linalg dialect. - - An linalg structured op can only perform reduction on the whole expression. - For a TACO tensor algebra expression, the reduction on an IndexVar is done at - the smallest expression that contains all the uses of the IndexVar. If such an - expression is only part of the whole expression, we need to split this - sub-expression tree out from its parent and implement the sub-expression as a - structured op. - - This routine identifies the root expression node for performing a reduction on - the given IndexVar. If the reduction of the given IndexVar should be performed - on expression X, then the IndexVar is added to expr_to_info[X].reduce_indices - - Args: - expr: The root IndexExpr for the tensor algebra expression. - reduce_index: The IndexVar which we want to find out the proper expression - to perform a reduction. - expr_to_info: The dictionary to look up _ExprInfo for IndexExpr. - - Raises: - ValueError: If the expression is not proper or not supported. - """ - expr_info = expr_to_info[expr] - if isinstance(expr, Access): - # Handle simple reduction expression in the format of A[i] = B[i, j]. - if reduce_index in expr_info.src_indices: - expr_info.reduce_indices.add(reduce_index) - return - elif isinstance(expr, IndexVar): - # A[i] = B[i] + j is not allowed. - raise ValueError(f"IndexVar is not part of the iteration domain: {expr}.") - - assert isinstance(expr, _BinaryExpr) - a_info = expr_to_info[expr.a] - b_info = expr_to_info[expr.b] - - if reduce_index in a_info.src_indices and reduce_index in b_info.src_indices: - expr_info.reduce_indices.add(reduce_index) - return - - if reduce_index in a_info.src_indices: - _mark_structured_op_root(expr.a, reduce_index, expr_to_info) - elif reduce_index in b_info.src_indices: - _mark_structured_op_root(expr.b, reduce_index, expr_to_info) - else: - assert False, "Unreachable path" - - -def _accumulate_reduce_indices( - expr: IndexExpr, - expr_to_info: _ExprInfoDict, -) -> None: - """Propagates reduction indices from child expressions to parent expressions. - - This routine is passed to the post-order visitor as an _ExprVisitor object. - - Args: - expr: The IndexExpr being visited. - expr_to_info: The dictionary of (IndexExpr, _ExprInfo) for recording the - expression information. - """ - assert expr in expr_to_info - expr_info = expr_to_info[expr] - - if isinstance(expr, _BinaryExpr): - a_info = expr_to_info[expr.a] - b_info = expr_to_info[expr.b] - expr_info.acc_reduce_indices = ( - a_info.acc_reduce_indices - | b_info.acc_reduce_indices - | expr_info.reduce_indices - ) - elif isinstance(expr, _UnaryExpr): - a_info = expr_to_info[expr.a] - expr_info.acc_reduce_indices = ( - a_info.acc_reduce_indices | expr_info.reduce_indices - ) - elif isinstance(expr, IndexVar): - # If an IndexVar is reducing itself, it means the IndexVar is outside the - # iteration domain. This usage is now allowed and we should emit an error - # before reaching here. - assert not expr_info.reduce_indices - else: - assert isinstance(expr, Access) - # Handle simple reduction expression in the format of A[i] = B[i, j]. - expr_info.acc_reduce_indices = expr_info.reduce_indices - - -def _gather_structured_op( - expr: IndexExpr, - expr_to_info: _ExprInfoDict, - structop_roots: List[IndexExpr], -) -> None: - """Adds structured op root expression information to structop_roots. - - This routine is passed to the post-order visitor as an _ExprVisitor object. - - Args: - expr: The IndexExpr being visited. - expr_to_info: The dictionary to look up _ExprInfo for IndexExpr. - structop_roots: The resulting list of IndexExpr that are the roots for - linalg structured ops. - """ - if not expr_to_info[expr].reduce_indices: - return - - # If the expression is the root for reducing some indices, collect the indices - # and dimensions for the reduction result. - dst_indices = [] - dst_dims = [] - mode_fmts = [] - for i, d in zip(expr_to_info[expr].src_indices, expr_to_info[expr].dim_infos): - if i not in expr_to_info[expr].acc_reduce_indices: - dst_indices.append(i) - dst_dims.append(d.dim) - mode_fmts.append(d.mode_format) - - # Add the information to the dictionary. - op_info = _StructOpInfo( - tuple(dst_indices), - tuple(dst_dims), - expr.dtype(), - f"temp{len(structop_roots)}", - _make_format(mode_fmts), - ) - expr_to_info[expr].structop_info = op_info - - # Add the expression to the list of structured op roots. - structop_roots.append(expr) - - -def _is_structured_op_leaf( - expr: IndexExpr, - root: IndexExpr, - expr_to_info: _ExprInfoDict, - *unused_args, -) -> bool: - """Returns true iff the expression is a leaf node for a structured op. - - The root of a structured op is a leaf of its parent structured op that uses - its result. An expression node is a leaf node for the current structured op if - it is an Access node or the root for a structured op that is not the current - structured op. - - This routine is passed to the post-order visitor as a _SubtreeLeafChecker - object. Because the post-order visitor pass the same parameters to both - _SubtreeLeafChecker and _ExprVisitor, this routine may received unused - parameters. - - Args: - expr: The IndexExpr being visited. - root: The root of the current structured op. - expr_to_info: The dictionary to look up _ExprInfo for IndexExpr. - - Returns: - True if the current IndexExpr is a leaf for the current structured op. - """ - return ( - (expr != root and expr_to_info[expr].structop_info is not None) - or isinstance(expr, Access) - or isinstance(expr, IndexVar) - ) - - -def _gather_structured_op_input( - expr: IndexExpr, - root: IndexExpr, - expr_to_info: _ExprInfoDict, - structop_inputs: List[IndexExpr], -) -> None: - """Adds the IndexExpr to structop_inputs if it is an input. - - If the current IndexExpr is an input for the current structured op, adds it to - structop_inputs. The current IndexExpr is an input if it is an Access node or - if it is the root for a structured op that is not the current structured op. - - This routine is passed to the post-order visitor as an _ExprVisitor object. - - Args: - expr: The IndexExpr being visited. - root: The root of the current structured op. - expr_to_info: The dictionary to look up _ExprInfo for IndexExpr. - structop_inputs: The resulting list of IndexExpr that provide input to the - current structured op. - """ - if ( - (expr != root or isinstance(expr, Access)) and expr not in structop_inputs - ) and ( - isinstance(expr, Access) - or (expr in expr_to_info and expr_to_info[expr].structop_info) - ): - structop_inputs.append(expr) - - -def _emit_structured_op_input( - expr: IndexExpr, - expr_to_info: _ExprInfoDict, - op_def: lang.LinalgOpDef, -) -> lang.OperandDef: - """Emits OperandDef in the linalg dialect for the input IndexExpr. - - Args: - expr: The input IndexExpr for the current structured op. - expr_to_info: The dictionary to look up _ExprInfo for IndexExpr. - op_def: The linalg operation for the current structured op. - - Returns: - An OperandDef in the linalg dialect for the input IndexExpr. - """ - op_info = expr_to_info[expr].structop_info - if op_info and not isinstance(expr, Access): - # The input is a temporary tensor produced by another structured op. - indices = op_info.dst_indices - name = op_info.dst_name - else: - # The input is a user provided tensor. - assert isinstance(expr, Access) - indices = expr.indices - name = expr.tensor.name - - dim_sym = _mlir_symbols_from_index_vars(indices) - opnd = lang.OperandDef(lang.OperandKind.INPUT_TENSOR, lang.T, dim_sym) - op_def.add_operand(name, opnd) - return opnd - - -def _check_and_build_unary(a: Access, op: _UnaryOp) -> "_UnaryExpr": - """Build a unary operation ceil. - - Args: - a: The operand, which could be any Python object from user inputs. - op: An _UnaryOp object representing the operation. - - Returns: - A _UnaryExpr object representing the operation. - - Raises: - ValueError: If a is not an IndexExpr. - """ - if not isinstance(a, Access): - raise ValueError(f"Expected an Access Operand: {a}") - return a._build_unary_expr(op) - - -def ceil(a: Access) -> "_UnaryExpr": - """Defines the operation ceil. - - Args: - a: The operand, which could be any Python object from user inputs. - - Returns: - A _UnaryExpr object representing the operation. - - Raises: - ValueError: If a is not an IndexExpr. - """ - return _check_and_build_unary(a, _op_ceil) - - -def floor(a: Access) -> "_UnaryExpr": - """Defines the operation floor. - - Args: - a: The operand, which could be any Python object from user inputs. - - Returns: - A _UnaryExpr object representing the operation. - - Raises: - ValueError: If a is not an IndexExpr. - """ - return _check_and_build_unary(a, _op_floor) diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_api.py b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_api.py deleted file mode 100644 index d11eb76edca93..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_api.py +++ /dev/null @@ -1,53 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -"""Supports the PyTACO API with the MLIR-PyTACO implementation. - -See http://tensor-compiler.org/ for TACO tensor compiler. - -This module exports the MLIR-PyTACO implementation through the language defined -by PyTACO. In particular, it defines the function and type aliases and constants -needed for the PyTACO API to support the execution of PyTACO programs using the -MLIR-PyTACO implementation. -""" - -from . import mlir_pytaco -from . import mlir_pytaco_io - -# Functions defined by PyTACO API. -ceil = mlir_pytaco.ceil -floor = mlir_pytaco.floor -get_index_vars = mlir_pytaco.get_index_vars -from_array = mlir_pytaco.Tensor.from_array -read = mlir_pytaco_io.read -write = mlir_pytaco_io.write - -# Classes defined by PyTACO API. -dtype = mlir_pytaco.DType -mode_format = mlir_pytaco.ModeFormat -mode_ordering = mlir_pytaco.ModeOrdering -mode_format_pack = mlir_pytaco.ModeFormatPack -format = mlir_pytaco.Format -index_var = mlir_pytaco.IndexVar -tensor = mlir_pytaco.Tensor -index_expression = mlir_pytaco.IndexExpr -access = mlir_pytaco.Access - -# Data type constants defined by PyTACO API. -int8 = mlir_pytaco.DType(mlir_pytaco.Type.INT8) -int16 = mlir_pytaco.DType(mlir_pytaco.Type.INT16) -int32 = mlir_pytaco.DType(mlir_pytaco.Type.INT32) -int64 = mlir_pytaco.DType(mlir_pytaco.Type.INT64) -float16 = mlir_pytaco.DType(mlir_pytaco.Type.FLOAT16) -float32 = mlir_pytaco.DType(mlir_pytaco.Type.FLOAT32) -float64 = mlir_pytaco.DType(mlir_pytaco.Type.FLOAT64) -complex64 = mlir_pytaco.DType(mlir_pytaco.Type.COMPLEX64) -complex128 = mlir_pytaco.DType(mlir_pytaco.Type.COMPLEX128) - -# Storage format constants defined by the PyTACO API. In PyTACO, each storage -# format constant has two aliasing names. -compressed = mlir_pytaco.ModeFormat.COMPRESSED -Compressed = mlir_pytaco.ModeFormat.COMPRESSED -dense = mlir_pytaco.ModeFormat.DENSE -Dense = mlir_pytaco.ModeFormat.DENSE diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_io.py b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_io.py deleted file mode 100644 index 785401c25dc87..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_io.py +++ /dev/null @@ -1,82 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -"""Experimental MLIR-PyTACO with sparse tensor support. - -See http://tensor-compiler.org/ for TACO tensor compiler. - -This module implements the PyTACO API for writing a tensor to a file or reading -a tensor from a file. - -See the following links for Matrix Market Exchange (.mtx) format and FROSTT -(.tns) format: - https://math.nist.gov/MatrixMarket/formats.html - http://frostt.io/tensors/file-formats.html -""" - -from typing import List, TextIO - -from . import mlir_pytaco - -# Define the type aliases so that we can write the implementation here as if -# it were part of mlir_pytaco.py. -Tensor = mlir_pytaco.Tensor -Format = mlir_pytaco.Format -DType = mlir_pytaco.DType -Type = mlir_pytaco.Type - -# Constants used in the implementation. -_MTX_FILENAME_SUFFIX = ".mtx" -_TNS_FILENAME_SUFFIX = ".tns" - - -def read(filename: str, fmt: Format, dtype: DType = DType(Type.FLOAT32)) -> Tensor: - """Inputs a tensor from a given file. - - The name suffix of the file specifies the format of the input tensor. We - currently only support .mtx format for support sparse tensors. - - Args: - filename: A string input filename. - fmt: The storage format of the tensor. - dtype: The data type, default to float32. - - Raises: - ValueError: If filename doesn't end with .mtx or .tns, or fmt is not an - instance of Format or fmt is not a sparse tensor. - """ - if not isinstance(filename, str) or ( - not filename.endswith(_MTX_FILENAME_SUFFIX) - and not filename.endswith(_TNS_FILENAME_SUFFIX) - ): - raise ValueError( - "Expected string filename ends with " - f"{_MTX_FILENAME_SUFFIX} or {_TNS_FILENAME_SUFFIX}: " - f"{filename}." - ) - - return Tensor.from_file(filename, fmt, dtype) - - -def write(filename: str, tensor: Tensor) -> None: - """Outputs a tensor to a given file. - - The name suffix of the file specifies the format of the output. We currently - only support .tns format. - - Args: - filename: A string output filename. - tensor: The tensor to output. - - Raises: - ValueError: If filename doesn't end with .tns or tensor is not a Tensor. - """ - if not isinstance(filename, str) or not filename.endswith(_TNS_FILENAME_SUFFIX): - raise ValueError( - "Expected string filename ends with" f" {_TNS_FILENAME_SUFFIX}: {filename}." - ) - if not isinstance(tensor, Tensor): - raise ValueError(f"Expected a Tensor object: {tensor}.") - - tensor.to_file(filename) diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_utils.py b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_utils.py deleted file mode 100644 index 1e1061b8b858d..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_utils.py +++ /dev/null @@ -1,424 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -# This file contains the utilities to process sparse tensor outputs. - -from typing import Callable, Dict, Sequence, Tuple -import ctypes -import functools -import numpy as np -import os - -# Import MLIR related modules. -from mlir import execution_engine -from mlir import ir -from mlir import runtime -from mlir.dialects import sparse_tensor - -from . import mlir_sparse_compiler - -# Type aliases for type annotation. -_SupportFunc = Callable[..., None] -_SupportFuncLocator = Callable[[np.dtype], Tuple[_SupportFunc, _SupportFunc]] - -# The name for the environment variable that provides the full path for the -# supporting library. -_SUPPORTLIB_ENV_VAR = "SUPPORTLIB" -# The default supporting library if the environment variable is not provided. -_DEFAULT_SUPPORTLIB = "libmlir_c_runner_utils.so" - -# The JIT compiler optimization level. -_OPT_LEVEL = 2 -# The entry point to the JIT compiled program. -_ENTRY_NAME = "main" - - -@functools.lru_cache() -def _get_support_lib_name() -> str: - """Gets the string name for the supporting C shared library.""" - return os.getenv(_SUPPORTLIB_ENV_VAR, _DEFAULT_SUPPORTLIB) - - -@functools.lru_cache() -def _get_sparse_compiler() -> mlir_sparse_compiler.SparseCompiler: - """Gets the MLIR sparse compiler with default setting.""" - return mlir_sparse_compiler.SparseCompiler( - options="", opt_level=_OPT_LEVEL, shared_libs=[_get_support_lib_name()] - ) - - -def _record_support_funcs( - ty: np.dtype, - to_func: _SupportFunc, - from_func: _SupportFunc, - ty_to_funcs: Dict[np.dtype, Tuple[_SupportFunc, _SupportFunc]], -) -> None: - """Records the two supporting functions for a given data type.""" - to_func.restype = ctypes.c_void_p - from_func.restype = ctypes.c_void_p - ty_to_funcs[ty] = (to_func, from_func) - - -@functools.lru_cache() -def _get_support_func_locator() -> _SupportFuncLocator: - """Constructs a function to locate the supporting functions for a data type. - - Loads the supporting C shared library with the needed routines. Constructs a - dictionary from the supported data types to the routines for the data types, - and then a function to look up the dictionary for a given data type. - - The name of the supporting C shared library is either provided by an - an environment variable or a default value. - - Returns: - The function to look up the supporting functions for a given data type. - - Raises: - OSError: If there is any problem in loading the shared library. - ValueError: If the shared library doesn't contain the needed routines. - """ - # This raises OSError exception if there is any problem in loading the shared - # library. - c_lib = ctypes.CDLL(_get_support_lib_name()) - - type_to_funcs = {} - try: - support_types = [ - ( - np.int8, - c_lib.convertToMLIRSparseTensorI8, - c_lib.convertFromMLIRSparseTensorI8, - ), - ( - np.int16, - c_lib.convertToMLIRSparseTensorI16, - c_lib.convertFromMLIRSparseTensorI16, - ), - ( - np.int32, - c_lib.convertToMLIRSparseTensorI32, - c_lib.convertFromMLIRSparseTensorI32, - ), - ( - np.int64, - c_lib.convertToMLIRSparseTensorI64, - c_lib.convertFromMLIRSparseTensorI64, - ), - ( - np.float16, - c_lib.convertToMLIRSparseTensorF16, - c_lib.convertFromMLIRSparseTensorF16, - ), - ( - np.float32, - c_lib.convertToMLIRSparseTensorF32, - c_lib.convertFromMLIRSparseTensorF32, - ), - ( - np.float64, - c_lib.convertToMLIRSparseTensorF64, - c_lib.convertFromMLIRSparseTensorF64, - ), - ( - np.complex64, - c_lib.convertToMLIRSparseTensorC32, - c_lib.convertFromMLIRSparseTensorC32, - ), - ( - np.complex128, - c_lib.convertToMLIRSparseTensorC64, - c_lib.convertFromMLIRSparseTensorC64, - ), - ] - except Exception as e: - raise ValueError(f"Missing supporting function: {e}") from e - for i, info in enumerate(support_types): - _record_support_funcs(info[0], info[1], info[2], type_to_funcs) - - def get_support_funcs(ty: np.dtype): - funcs = type_to_funcs[ty] - assert funcs is not None - return funcs - - return get_support_funcs - - -def sparse_tensor_to_coo_tensor( - sparse_tensor: ctypes.c_void_p, - dtype: np.dtype, -) -> Tuple[int, int, np.ndarray, np.ndarray, np.ndarray]: - """Converts an MLIR sparse tensor to a COO-flavored format tensor. - - Args: - sparse_tensor: A ctypes.c_void_p to the MLIR sparse tensor descriptor. - dtype: The numpy data type for the tensor elements. - - Returns: - A tuple that contains the following values for the COO-flavored format - tensor: - rank: An integer for the rank of the tensor. - nse: An integer for the number of non-zero values in the tensor. - shape: A 1D numpy array of integers, for the shape of the tensor. - values: A 1D numpy array, for the non-zero values in the tensor. - indices: A 2D numpy array of integers, representing the indices for the - non-zero values in the tensor. - - Raises: - OSError: If there is any problem in loading the shared library. - ValueError: If the shared library doesn't contain the needed routines. - """ - convert_from = _get_support_func_locator()(dtype)[1] - rank = ctypes.c_ulonglong(0) - nse = ctypes.c_ulonglong(0) - shape = ctypes.POINTER(ctypes.c_ulonglong)() - - values = ctypes.POINTER(runtime.as_ctype(np.dtype(dtype)))() - indices = ctypes.POINTER(ctypes.c_ulonglong)() - convert_from( - sparse_tensor, - ctypes.byref(rank), - ctypes.byref(nse), - ctypes.byref(shape), - ctypes.byref(values), - ctypes.byref(indices), - ) - - # Convert the returned values to the corresponding numpy types. - shape = np.ctypeslib.as_array(shape, shape=[rank.value]) - values = runtime.to_numpy(np.ctypeslib.as_array(values, shape=[nse.value])) - indices = np.ctypeslib.as_array(indices, shape=[nse.value, rank.value]) - return rank.value, nse.value, shape, values, indices - - -def coo_tensor_to_sparse_tensor( - np_shape: np.ndarray, - np_values: np.ndarray, - np_indices: np.ndarray, - np_perm: np.ndarray, - np_sparse: np.ndarray, -) -> int: - """Converts a COO-flavored format sparse tensor to an MLIR sparse tensor. - - Args: - np_shape: A 1D numpy array of integers, for the shape of the tensor. - np_values: A 1D numpy array, for the non-zero values in the tensor. - np_indices: A 2D numpy array of integers, representing the indices for the - non-zero values in the tensor. - np_perm: A 1D numpy array of integers, representing the storage ordering - for the dimensions. - np_sparse: A 1D numpy array of uint8, representing the sparsity values - for the dimensions. - - Returns: - An integer for the non-null ctypes.c_void_p to the MLIR sparse tensor - descriptor. - - Raises: - OSError: If there is any problem in loading the shared library. - ValueError: If the shared library doesn't contain the needed routines. - """ - - r = len(np_shape) - rank = ctypes.c_ulonglong(r) - nse = ctypes.c_ulonglong(len(np_values)) - shape = np_shape.ctypes.data_as(ctypes.POINTER(ctypes.c_ulonglong)) - values = np_values.ctypes.data_as( - ctypes.POINTER(runtime.as_ctype(np.dtype(np_values.dtype))) - ) - indices = np_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_ulonglong)) - - perm = np_perm.ctypes.data_as(ctypes.POINTER(ctypes.c_ulonglong)) - sparse = np_sparse.ctypes.data_as(ctypes.POINTER(ctypes.c_uint8)) - - convert_to = _get_support_func_locator()(np_values.dtype.type)[0] - ptr = convert_to(rank, nse, shape, values, indices, perm, sparse) - assert ptr is not None, "Problem with calling convertToMLIRSparseTensorF64" - return ptr - - -def compile_and_build_engine(module: ir.Module) -> execution_engine.ExecutionEngine: - """Compiles an MLIR module and builds a JIT execution engine. - - Args: - module: The MLIR module. - - Returns: - A JIT execution engine for the MLIR module. - - """ - return _get_sparse_compiler().compile_and_jit(module) - - -class _SparseTensorDescriptor(ctypes.Structure): - """A C structure for an MLIR sparse tensor.""" - - _fields_ = [ - # A pointer for the MLIR sparse tensor storage. - ("storage", ctypes.POINTER(ctypes.c_ulonglong)), - # An MLIR MemRef descriptor for the shape of the sparse tensor. - ("shape", runtime.make_nd_memref_descriptor(1, ctypes.c_ulonglong)), - ] - - -def _output_one_dim(dim: int, rank: int, shape: str, type: str) -> str: - """Produces the MLIR text code to output the size for the given dimension.""" - return f""" - %c{dim} = arith.constant {dim} : index - %d{dim} = tensor.dim %t, %c{dim} : tensor<{shape}x{type}, #enc> - memref.store %d{dim}, %b[%c{dim}] : memref<{rank}xindex> -""" - - -# TODO: With better support from MLIR, we may improve the current implementation -# by doing the following: -# (1) Use Python code to generate the kernel instead of doing MLIR text code -# stitching. -# (2) Use scf.for instead of an unrolled loop to write out the dimension sizes -# when tensor.dim supports non-constant dimension value. -def _get_create_sparse_tensor_kernel( - sparsity_codes: Sequence[sparse_tensor.DimLevelType], type: str -) -> str: - """Creates an MLIR text kernel to contruct a sparse tensor from a file. - - The kernel returns a _SparseTensorDescriptor structure. - """ - rank = len(sparsity_codes) - - # Use ? to represent a dimension in the dynamic shape string representation. - shape = "x".join(map(lambda d: "?", range(rank))) - - # Convert the encoded sparsity values to a string representation. - sparsity = ", ".join( - map(lambda s: '"compressed"' if s.value else '"dense"', sparsity_codes) - ) - - # Get the MLIR text code to write the dimension sizes to the output buffer. - output_dims = "\n".join( - map(lambda d: _output_one_dim(d, rank, shape, type), range(rank)) - ) - - # Return the MLIR text kernel. - return f""" -!Ptr = !llvm.ptr -#enc = #sparse_tensor.encoding<{{ - lvlTypes = [ {sparsity} ] -}}> -func.func @{_ENTRY_NAME}(%filename: !Ptr) -> (tensor<{shape}x{type}, #enc>, memref<{rank}xindex>) -attributes {{ llvm.emit_c_interface }} {{ - %t = sparse_tensor.new %filename : !Ptr to tensor<{shape}x{type}, #enc> - %b = memref.alloc() : memref<{rank}xindex> - {output_dims} - return %t, %b : tensor<{shape}x{type}, #enc>, memref<{rank}xindex> -}}""" - - -def create_sparse_tensor( - filename: str, sparsity: Sequence[sparse_tensor.DimLevelType], type: str -) -> Tuple[ctypes.c_void_p, np.ndarray]: - """Creates an MLIR sparse tensor from the input file. - - Args: - filename: A string for the name of the file that contains the tensor data in - a COO-flavored format. - sparsity: A sequence of DimLevelType values, one for each dimension of the - tensor. - - Returns: - A Tuple containing the following values: - storage: A ctypes.c_void_p for the MLIR sparse tensor storage. - shape: A 1D numpy array of integers, for the shape of the tensor. - - Raises: - OSError: If there is any problem in loading the supporting C shared library. - ValueError: If the shared library doesn't contain the needed routine. - """ - with ir.Context() as ctx, ir.Location.unknown(): - module = _get_create_sparse_tensor_kernel(sparsity, type) - module = ir.Module.parse(module) - engine = compile_and_build_engine(module) - - # A sparse tensor descriptor to receive the kernel result. - c_tensor_desc = _SparseTensorDescriptor() - # Convert the filename to a byte stream. - c_filename = ctypes.c_char_p(bytes(filename, "utf-8")) - - arg_pointers = [ - ctypes.byref(ctypes.pointer(c_tensor_desc)), - ctypes.byref(c_filename), - ] - - # Invoke the execution engine to run the module and return the result. - engine.invoke(_ENTRY_NAME, *arg_pointers) - shape = runtime.ranked_memref_to_numpy(ctypes.pointer(c_tensor_desc.shape)) - return c_tensor_desc.storage, shape - - -# TODO: With better support from MLIR, we may improve the current implementation -# by using Python code to generate the kernel instead of doing MLIR text code -# stitching. -def _get_output_sparse_tensor_kernel( - sparsity_codes: Sequence[sparse_tensor.DimLevelType], type: str -) -> str: - """Creates an MLIR text kernel to output a sparse tensor to a file. - - The kernel returns void. - """ - rank = len(sparsity_codes) - - # Use ? to represent a dimension in the dynamic shape string representation. - shape = "x".join(map(lambda d: "?", range(rank))) - - # Convert the encoded sparsity values to a string representation. - sparsity = ", ".join( - map(lambda s: '"compressed"' if s.value else '"dense"', sparsity_codes) - ) - - # Return the MLIR text kernel. - return f""" -!Ptr = !llvm.ptr -#enc = #sparse_tensor.encoding<{{ - lvlTypes = [ {sparsity} ] -}}> -func.func @{_ENTRY_NAME}(%t: tensor<{shape}x{type}, #enc>, %filename: !Ptr) -attributes {{ llvm.emit_c_interface }} {{ - sparse_tensor.out %t, %filename : tensor<{shape}x{type}, #enc>, !Ptr - func.return -}}""" - - -def output_sparse_tensor( - tensor: ctypes.c_void_p, - filename: str, - sparsity: Sequence[sparse_tensor.DimLevelType], - type: str, -) -> None: - """Outputs an MLIR sparse tensor to the given file. - - Args: - tensor: A C pointer to the MLIR sparse tensor. - filename: A string for the name of the file that contains the tensor data in - a COO-flavored format. - sparsity: A sequence of DimLevelType values, one for each dimension of the - tensor. - type: The MLIR string for the data type. - - Raises: - OSError: If there is any problem in loading the supporting C shared library. - ValueError: If the shared library doesn't contain the needed routine. - """ - with ir.Context() as ctx, ir.Location.unknown(): - module = _get_output_sparse_tensor_kernel(sparsity, type) - module = ir.Module.parse(module) - engine = compile_and_build_engine(module) - - # Convert the filename to a byte stream. - c_filename = ctypes.c_char_p(bytes(filename, "utf-8")) - - arg_pointers = [ - ctypes.byref(ctypes.cast(tensor, ctypes.c_void_p)), - ctypes.byref(c_filename), - ] - - # Invoke the execution engine to run the module and return the result. - engine.invoke(_ENTRY_NAME, *arg_pointers) diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_sparse_compiler.py b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_sparse_compiler.py deleted file mode 100644 index 8f193b81bb07c..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_sparse_compiler.py +++ /dev/null @@ -1,41 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -# This file contains the sparse compiler class. It is copied from -# test/Integration/Dialect/SparseTensor/python/ until we have a better -# solution. - -from mlir import execution_engine -from mlir import ir -from mlir import passmanager -from typing import Sequence - - -class SparseCompiler: - """Sparse compiler class for compiling and building MLIR modules.""" - - def __init__(self, options: str, opt_level: int, shared_libs: Sequence[str]): - pipeline = f"builtin.module(sparse-compiler{{{options} reassociate-fp-reductions=1 enable-index-optimizations=1}})" - self.pipeline = pipeline - self.opt_level = opt_level - self.shared_libs = shared_libs - - def __call__(self, module: ir.Module): - """Convenience application method.""" - self.compile(module) - - def compile(self, module: ir.Module): - """Compiles the module by invoking the sparse copmiler pipeline.""" - passmanager.PassManager.parse(self.pipeline).run(module.operation) - - def jit(self, module: ir.Module) -> execution_engine.ExecutionEngine: - """Wraps the module in a JIT execution engine.""" - return execution_engine.ExecutionEngine( - module, opt_level=self.opt_level, shared_libs=self.shared_libs - ) - - def compile_and_jit(self, module: ir.Module) -> execution_engine.ExecutionEngine: - """Compiles and jits the module.""" - self.compile(module) - return self.jit(module) diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/testing_utils.py b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/testing_utils.py deleted file mode 100644 index 1be88fa8bd709..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/testing_utils.py +++ /dev/null @@ -1,47 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -# This file contains the utilities to support testing. - -import numpy as np - - -def compare_sparse_tns(expected: str, actual: str, rtol: float = 0.0001) -> bool: - """Compares sparse tensor actual output file with expected output file. - - This routine assumes the input files are in FROSTT format. See - http://frostt.io/tensors/file-formats.html for FROSTT (.tns) format. - - It also assumes the first line in the output file is a comment line. - - """ - with open(actual, "r") as actual_f: - with open(expected, "r") as expected_f: - # Skip the first comment line. - _ = actual_f.readline() - _ = expected_f.readline() - - # Compare the two lines of meta data - if ( - actual_f.readline() != expected_f.readline() - or actual_f.readline() != expected_f.readline() - ): - return FALSE - - actual_data = np.loadtxt(actual, np.float64, skiprows=3) - expected_data = np.loadtxt(expected, np.float64, skiprows=3) - return np.allclose(actual_data, expected_data, rtol=rtol) - - -def file_as_string(file: str) -> str: - """Returns contents of file as string.""" - with open(file, "r") as f: - return f.read() - - -def run_test(f): - """Prints the test name and runs the test.""" - print(f.__name__) - f() - return f diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/unit_test_tensor_core.py b/mlir/test/Integration/Dialect/SparseTensor/taco/unit_test_tensor_core.py deleted file mode 100644 index 45ce446478dee..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/unit_test_tensor_core.py +++ /dev/null @@ -1,647 +0,0 @@ -# RUN: env SUPPORTLIB=%mlir_c_runner_utils %PYTHON %s | FileCheck %s - -from string import Template - -import numpy as np -import os -import sys -import tempfile - -_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(_SCRIPT_PATH) -from tools import mlir_pytaco -from tools import testing_utils as testing_utils - -# Define the aliases to shorten the code. -_COMPRESSED = mlir_pytaco.ModeFormat.COMPRESSED -_DENSE = mlir_pytaco.ModeFormat.DENSE - - -def _init_3d(T, I, J, K): - for i in range(I): - for j in range(J): - for k in range(K): - T.insert([i, j, k], i + j + k + 1) - - -def _init_2d(T, I, J): - for i in range(I): - for j in range(J): - T.insert([i, j], i + j + 1) - - -def _init_1d_with_value(T, I, v): - for i in range(I): - T.insert([i], v) - - -def test_expect_error(name, code, error): - """Executes the code then verifies the expected error message.""" - try: - exec(code) - except ValueError as e: - passed = "passed" if (str(e).startswith(error)) else "failed" - print(f"test_{name}: {passed}") - - -# CHECK-LABEL: test_tensor_dtype -@testing_utils.run_test -def test_tensor_dtype(): - passed = mlir_pytaco.DType(mlir_pytaco.Type.INT16).is_int() - passed += mlir_pytaco.DType(mlir_pytaco.Type.INT32).is_int() - passed += mlir_pytaco.DType(mlir_pytaco.Type.INT64).is_int() - passed += mlir_pytaco.DType(mlir_pytaco.Type.FLOAT32).is_float() - passed += mlir_pytaco.DType(mlir_pytaco.Type.FLOAT64).is_float() - # CHECK: Number of passed: 5 - print("Number of passed:", passed) - - -# CHECK: test_mode_ordering_not_int: passed -test_expect_error( - "mode_ordering_not_int", - "m = mlir_pytaco.ModeOrdering(['x'])", - "Ordering must be a list of integers", -) - -# CHECK: test_mode_ordering_not_permutation: passed -test_expect_error( - "mode_ordering_not_permutation", - "m = mlir_pytaco.ModeOrdering([2, 1])", - "Invalid ordering", -) - -# CHECK: test_mode_format_invalid: passed -test_expect_error( - "mode_format_invalid", - "m = mlir_pytaco.ModeFormatPack(['y'])", - "Formats must be a list of ModeFormat", -) - -# CHECK: test_expect_mode_format_pack: passed -test_expect_error( - "expect_mode_format_pack", - ( - """ -mode_ordering = mlir_pytaco.ModeOrdering([0, 1, 2]) -f = mlir_pytaco.Format(["x"], mode_ordering) - """ - ), - "Expected a list of ModeFormat", -) - -# CHECK: test_expect_mode_ordering: passed -test_expect_error( - "expect_mode_ordering", - ( - """ -mode_format_pack = mlir_pytaco.ModeFormatPack([_COMPRESSED, _COMPRESSED]) -f = mlir_pytaco.Format(mode_format_pack, "x") - """ - ), - "Expected ModeOrdering", -) - -# CHECK: test_inconsistent_mode_format_pack_and_mode_ordering: passed -test_expect_error( - "inconsistent_mode_format_pack_and_mode_ordering", - ( - """ -mode_format_pack = mlir_pytaco.ModeFormatPack([_COMPRESSED, _COMPRESSED]) -mode_ordering = mlir_pytaco.ModeOrdering([0, 1, 2]) -f = mlir_pytaco.Format(mode_format_pack, mode_ordering) - """ - ), - "Inconsistent ModeFormatPack and ModeOrdering", -) - - -# CHECK-LABEL: test_format_default_ordering -@testing_utils.run_test -def test_format_default_ordering(): - f = mlir_pytaco.Format([_COMPRESSED, _COMPRESSED]) - passed = 0 - passed += np.array_equal(f.ordering.ordering, [0, 1]) - # CHECK: Number of passed: 1 - print("Number of passed:", passed) - - -# CHECK-LABEL: test_format_explicit_ordering -@testing_utils.run_test -def test_format_explicit_ordering(): - f = mlir_pytaco.Format([_COMPRESSED, _DENSE], [1, 0]) - passed = 0 - passed += np.array_equal(f.ordering.ordering, [1, 0]) - # CHECK: Number of passed: 1 - print("Number of passed:", passed) - - -# CHECK-LABEL: test_index_var -@testing_utils.run_test -def test_index_var(): - i = mlir_pytaco.IndexVar() - j = mlir_pytaco.IndexVar() - passed = i.name != j.name - - vars = mlir_pytaco.get_index_vars(10) - passed += len(vars) == 10 - passed += all([isinstance(e, mlir_pytaco.IndexVar) for e in vars]) - - # CHECK: Number of passed: 3 - print("Number of passed:", passed) - - -# CHECK: test_tensor_invalid_first_argument: passed -test_expect_error( - "tensor_invalid_first_argument", - "t = mlir_pytaco.Tensor('f')", - "Invalid first argument", -) - -# CHECK: test_tensor_inconsistent_shape_and_format: passed -test_expect_error( - "tensor_inconsistent_shape_and_format", - ( - """ -mode_format_pack = mlir_pytaco.ModeFormatPack([_COMPRESSED, _COMPRESSED]) -mode_ordering = mlir_pytaco.ModeOrdering([0, 1]) -f = mlir_pytaco.Format(mode_format_pack, mode_ordering) -t = mlir_pytaco.Tensor([3], f) - """ - ), - "Inconsistent shape and format", -) - -# CHECK: test_tensor_invalid_format: passed -test_expect_error( - "tensor_invalid_format", - "t = mlir_pytaco.Tensor([3], 'f')", - "Invalid format argument", -) - -# CHECK: test_tensor_insert_nonlist_coordinate: passed -test_expect_error( - "tensor_insert_nonlist_coordinate", - ( - """ -t = mlir_pytaco.Tensor([3]) -t.insert(1, 0) - """ - ), - "Non list coordinate detected", -) - -# CHECK: test_tensor_insert_too_much_coordinate: passed -test_expect_error( - "tensor_insert_too_much_coordinate", - ( - """ -t = mlir_pytaco.Tensor([3]) -t.insert([0, 0], 0) - """ - ), - "Invalid coordinate", -) - -# CHECK: test_tensor_insert_coordinate_outof_range: passed -test_expect_error( - "tensor_insert_coordinate_outof_range", - ( - """ -t = mlir_pytaco.Tensor([1, 1]) -t.insert([1, 0], 0) - """ - ), - "Invalid coordinate", -) - -# CHECK: test_tensor_insert_coordinate_nonint: passed -test_expect_error( - "tensor_insert_coordinate_nonint", - ( - """ -t = mlir_pytaco.Tensor([1, 1]) -t.insert([0, "xy"], 0) - """ - ), - "Non integer coordinate detected", -) - -# CHECK: test_tensor_insert_invalid_value: passed -test_expect_error( - "tensor_insert_invalid_value", - ( - """ -t = mlir_pytaco.Tensor([1, 1]) -t.insert([0, 0], "x") - """ - ), - "Value is neither int nor float", -) - -# CHECK: test_access_non_index_var_index: passed -test_expect_error( - "access_non_index_var_index", - ( - """ -t = mlir_pytaco.Tensor([5, 6]) -i = mlir_pytaco.IndexVar() -a = mlir_pytaco.Access(t, (i, "j")) - """ - ), - "Indices contain non IndexVar", -) - -# CHECK: test_access_inconsistent_rank_indices: passed -test_expect_error( - "access_inconsistent_rank_indices", - ( - """ -t = mlir_pytaco.Tensor([5, 6]) -i = mlir_pytaco.IndexVar() -a = mlir_pytaco.Access(t, (i,)) - """ - ), - "Invalid indices for rank", -) - -# CHECK: test_access_invalid_indices_for_rank: passed -test_expect_error( - "access_invalid_indices_for_rank", - ( - """ -t = mlir_pytaco.Tensor([5, 6]) -i, j, k = mlir_pytaco.get_index_vars(3) -a = mlir_pytaco.Access(t, (i,j, k)) - """ - ), - "Invalid indices for rank", -) - -# CHECK: test_invalid_indices: passed -test_expect_error( - "invalid_indices", - ( - """ -i, j = mlir_pytaco.get_index_vars(2) -A = mlir_pytaco.Tensor([2, 3]) -B = mlir_pytaco.Tensor([2, 3]) -C = mlir_pytaco.Tensor([2, 3], _DENSE) -C[i, j] = A[1, j] + B[i, j] - """ - ), - "Expected IndexVars", -) - -# CHECK: test_inconsistent_rank_indices: passed -test_expect_error( - "inconsistent_rank_indices", - ( - """ -i, j = mlir_pytaco.get_index_vars(2) -A = mlir_pytaco.Tensor([2, 3]) -C = mlir_pytaco.Tensor([2, 3], _DENSE) -C[i, j] = A[i] - """ - ), - "Invalid indices for rank", -) - -# CHECK: test_destination_index_not_used_in_source: passed -test_expect_error( - "destination_index_not_used_in_source", - ( - """ -i, j = mlir_pytaco.get_index_vars(2) -A = mlir_pytaco.Tensor([3]) -C = mlir_pytaco.Tensor([3], _DENSE) -C[j] = A[i] -C.evaluate() - """ - ), - "Destination IndexVar not used in the source expression", -) - -# CHECK: test_destination_dim_not_consistent_with_source: passed -test_expect_error( - "destination_dim_not_consistent_with_source", - ( - """ -i = mlir_pytaco.IndexVar() -A = mlir_pytaco.Tensor([3]) -C = mlir_pytaco.Tensor([5], _DENSE) -C[i] = A[i] -C.evaluate() - """ - ), - "Inconsistent destination dimension for IndexVar", -) - -# CHECK: test_inconsistent_source_dim: passed -test_expect_error( - "inconsistent_source_dim", - ( - """ -i = mlir_pytaco.IndexVar() -A = mlir_pytaco.Tensor([3]) -B = mlir_pytaco.Tensor([5]) -C = mlir_pytaco.Tensor([3], _DENSE) -C[i] = A[i] + B[i] -C.evaluate() - """ - ), - "Inconsistent source dimension for IndexVar", -) - -# CHECK: test_index_var_outside_domain: passed -test_expect_error( - "index_var_outside_domain", - ( - """ -i, j = mlir_pytaco.get_index_vars(2) -A = mlir_pytaco.Tensor([3]) -B = mlir_pytaco.Tensor([3]) -B[i] = A[i] + j -B.evaluate() - """ - ), - "IndexVar is not part of the iteration domain", -) - - -# CHECK-LABEL: test_tensor_all_dense_sparse -@testing_utils.run_test -def test_tensor_all_dense_sparse(): - a = mlir_pytaco.Tensor([4], [_DENSE]) - passed = not a.is_dense() - passed += a.order == 1 - passed += a.shape[0] == 4 - # CHECK: Number of passed: 3 - print("Number of passed:", passed) - - -# CHECK-LABEL: test_tensor_true_dense -@testing_utils.run_test -def test_tensor_true_dense(): - a = mlir_pytaco.Tensor.from_array(np.random.uniform(size=5)) - passed = a.is_dense() - passed += a.order == 1 - passed += a.shape[0] == 5 - # CHECK: Number of passed: 3 - print("Number of passed:", passed) - - -# CHECK-LABEL: test_tensor_copy -@testing_utils.run_test -def test_tensor_copy(): - i, j = mlir_pytaco.get_index_vars(2) - I = 2 - J = 3 - A = mlir_pytaco.Tensor([I, J]) - A.insert([0, 1], 5.0) - A.insert([1, 2], 6.0) - B = mlir_pytaco.Tensor([I, J]) - B[i, j] = A[i, j] - passed = B._assignment is not None - passed += B._engine is None - try: - B.compute() - except ValueError as e: - passed += str(e).startswith("Need to invoke compile") - B.compile() - passed += B._engine is not None - B.compute() - passed += B._assignment is None - passed += B._engine is None - indices, values = B.get_coordinates_and_values() - passed += np.array_equal(indices, [[0, 1], [1, 2]]) - passed += np.allclose(values, [5.0, 6.0]) - # No temporary tensor is used. - passed += B._stats.get_total() == 0 - # CHECK: Number of passed: 9 - print("Number of passed:", passed) - - -# CHECK-LABEL: test_tensor_trivial_reduction -@testing_utils.run_test -def test_tensor_trivial_reduction(): - i, j = mlir_pytaco.get_index_vars(2) - I = 2 - J = 3 - A = mlir_pytaco.Tensor([I, J]) - A.insert([0, 1], 5.0) - A.insert([0, 2], 3.0) - A.insert([1, 2], 6.0) - B = mlir_pytaco.Tensor([I]) - B[i] = A[i, j] - indices, values = B.get_coordinates_and_values() - passed = np.array_equal(indices, [[0], [1]]) - passed += np.allclose(values, [8.0, 6.0]) - # No temporary tensor is used. - passed += B._stats.get_total() == 0 - - # CHECK: Number of passed: 3 - print("Number of passed:", passed) - - -# CHECK-LABEL: test_binary_add -@testing_utils.run_test -def test_binary_add(): - i = mlir_pytaco.IndexVar() - A = mlir_pytaco.Tensor([4]) - B = mlir_pytaco.Tensor([4]) - C = mlir_pytaco.Tensor([4]) - A.insert([1], 10) - A.insert([2], 1) - B.insert([3], 20) - B.insert([2], 2) - C[i] = A[i] + B[i] - indices, values = C.get_coordinates_and_values() - passed = np.array_equal(indices, [[1], [2], [3]]) - passed += np.array_equal(values, [10.0, 3.0, 20.0]) - # No temporary tensor is used. - passed += C._stats.get_total() == 0 - # CHECK: Number of passed: 3 - print("Number of passed:", passed) - - -# CHECK-LABEL: test_binary_add_sub -@testing_utils.run_test -def test_binary_add_sub(): - i = mlir_pytaco.IndexVar() - j = mlir_pytaco.IndexVar() - A = mlir_pytaco.Tensor([2, 3]) - B = mlir_pytaco.Tensor([2, 3]) - C = mlir_pytaco.Tensor([2, 3]) - D = mlir_pytaco.Tensor([2, 3]) - A.insert([0, 1], 10) - A.insert([1, 2], 40) - B.insert([0, 0], 20) - B.insert([1, 2], 30) - C.insert([0, 1], 5) - C.insert([1, 2], 7) - D[i, j] = A[i, j] + B[i, j] - C[i, j] - indices, values = D.get_coordinates_and_values() - passed = np.array_equal(indices, [[0, 0], [0, 1], [1, 2]]) - passed += np.array_equal(values, [20.0, 5.0, 63.0]) - # No temporary tensor is used. - passed += D._stats.get_total() == 0 - # CHECK: Number of passed: 3 - print("Number of passed:", passed) - - -# CHECK-LABEL: test_binary_mul_add -@testing_utils.run_test -def test_binary_mul_add(): - i = mlir_pytaco.IndexVar() - j = mlir_pytaco.IndexVar() - A = mlir_pytaco.Tensor([2, 3]) - B = mlir_pytaco.Tensor([2, 3]) - C = mlir_pytaco.Tensor([2, 3]) - D = mlir_pytaco.Tensor([2, 3]) - A.insert([0, 1], 10) - A.insert([1, 2], 40) - B.insert([0, 0], 20) - B.insert([1, 2], 30) - C.insert([0, 1], 5) - C.insert([1, 2], 7) - D[i, j] = A[i, j] * C[i, j] + B[i, j] - indices, values = D.get_coordinates_and_values() - passed = np.array_equal(indices, [[0, 0], [0, 1], [1, 2]]) - passed += np.array_equal(values, [20.0, 50.0, 310.0]) - # No temporary tensor is used. - passed += D._stats.get_total() == 0 - # CHECK: Number of passed: 3 - print("Number of passed:", passed) - - -# CHECK-LABEL: test_binary_add_reduce_at_root -@testing_utils.run_test -def test_binary_add_reduce_at_root(): - i = mlir_pytaco.IndexVar() - j = mlir_pytaco.IndexVar() - A = mlir_pytaco.Tensor([2, 3]) - B = mlir_pytaco.Tensor([2, 3]) - C = mlir_pytaco.Tensor([2], _DENSE) - A.insert([0, 1], 10) - A.insert([1, 2], 40) - B.insert([0, 0], 20) - B.insert([1, 2], 30) - C[i] = A[i, j] + B[i, j] - indices, values = C.get_coordinates_and_values() - passed = np.array_equal(indices, [[0], [1]]) - passed += np.array_equal(values, [30.0, 70.0]) - # No temporary tensor is used. - passed += C._stats.get_total() == 0 - # CHECK: Number of passed: 3 - print("Number of passed:", passed) - - -# CHECK-LABEL: test_binary_add_reduce_at_child -@testing_utils.run_test -def test_binary_add_reduce_at_child(): - i = mlir_pytaco.IndexVar() - j = mlir_pytaco.IndexVar() - I = 2 - J = 3 - A = mlir_pytaco.Tensor([I, J]) - B = mlir_pytaco.Tensor([J]) - C = mlir_pytaco.Tensor([I]) - D = mlir_pytaco.Tensor([I], _DENSE) - - _init_2d(A, I, J) - _init_1d_with_value(C, I, 2) - _init_1d_with_value(B, J, 1) - - D[i] = A[i, j] * B[j] + C[i] - indices, values = D.get_coordinates_and_values() - passed = np.array_equal(indices, [[0], [1]]) - passed += np.array_equal(values, [8.0, 11.0]) - - # The expression is implemented as: - # temp0[i] = A[i, j] * B[i] - # D[i] = temp0[i] + C[i] - # Check the temporary tensor introduced by the implementation. - stats = D._stats - passed += stats.get_total() == 1 - passed += stats.get_formats(0) == (_COMPRESSED,) - passed += stats.get_dimensions(0) == (I,) - # CHECK: Number of passed: 5 - print("Number of passed:", passed) - - -# CHECK-LABEL: test_binary_add_reduce_3d_1 -@testing_utils.run_test -def test_binary_add_reduce_3d_1(): - i, j, k, l = mlir_pytaco.get_index_vars(4) - I = 2 - J = 3 - K = 4 - L = 5 - A = mlir_pytaco.Tensor([I, J, K]) - B = mlir_pytaco.Tensor([I, J, L]) - C = mlir_pytaco.Tensor([K]) - D = mlir_pytaco.Tensor([L]) - E = mlir_pytaco.Tensor([I], _DENSE) - - _init_3d(A, I, J, K) - _init_3d(B, I, J, L) - _init_1d_with_value(C, K, 1) - _init_1d_with_value(D, L, 2) - - E[i] = A[i, j, k] * C[k] + B[i, j, l] * D[l] - indices, values = E.get_coordinates_and_values() - passed = np.array_equal(indices, [[0], [1]]) - passed += np.array_equal(values, [162.0, 204.0]) - - # The expression is implemented as: - # temp0[i, j] = A[i, j, k] * C[k] - # temp1[i, j] = B[i, j, l] * D[l] - # E[i] = temp0[i, j] + temp1[i, j] - # Check the two temporary tensors introduced by the implementation. - stats = E._stats - passed += stats.get_total() == 2 - passed += stats.get_formats(0) == (_COMPRESSED, _COMPRESSED) - passed += stats.get_dimensions(0) == (I, J) - passed += stats.get_formats(1) == (_COMPRESSED, _COMPRESSED) - passed += stats.get_dimensions(1) == (I, J) - # CHECK: Number of passed: 7 - print("Number of passed:", passed) - - -# CHECK-LABEL: test_binary_add_reduce_3d_2 -@testing_utils.run_test -def test_binary_add_reduce_3d_2(): - i, j, k, l = mlir_pytaco.get_index_vars(4) - I = 2 - J = 3 - K = 4 - L = 5 - A = mlir_pytaco.Tensor([I, J, K], [_COMPRESSED, _COMPRESSED, _DENSE]) - B = mlir_pytaco.Tensor([I, L, K], [_DENSE, _COMPRESSED, _COMPRESSED]) - C = mlir_pytaco.Tensor([J, K], [_COMPRESSED, _COMPRESSED]) - D = mlir_pytaco.Tensor([L]) - E = mlir_pytaco.Tensor([I], _DENSE) - - _init_3d(A, I, J, K) - _init_3d(B, I, L, K) - _init_2d(C, J, K) - _init_1d_with_value(D, L, 2) - - E[i] = A[i, j, k] + C[j, k] + B[i, l, k] * D[l] - indices, values = E.get_coordinates_and_values() - passed = np.array_equal(indices, [[0], [1]]) - passed += np.array_equal(values, [264.0, 316.0]) - - # The expression is implemented as: - # temp0[i, k] = A[i, j, k] + C[j, k] - # temp1[i, k] = B[i, l, k] * D[l] - # E[i] = temp0[i, k] + temp1[i, k] - # Check the two temporary tensors introduced by the implementation. - stats = E._stats - passed += stats.get_total() == 2 - passed += stats.get_formats(0) == (_COMPRESSED, _DENSE) - passed += stats.get_dimensions(0) == (I, K) - passed += stats.get_formats(1) == (_DENSE, _COMPRESSED) - passed += stats.get_dimensions(1) == (I, K) - # CHECK: Number of passed: 7 - print("Number of passed:", passed) diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/unit_test_tensor_io.py b/mlir/test/Integration/Dialect/SparseTensor/taco/unit_test_tensor_io.py deleted file mode 100644 index 1d5274759b6a9..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/unit_test_tensor_io.py +++ /dev/null @@ -1,116 +0,0 @@ -# RUN: env SUPPORTLIB=%mlir_c_runner_utils %PYTHON %s | FileCheck %s - -from string import Template - -import numpy as np -import os -import sys -import tempfile - -_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(_SCRIPT_PATH) -from tools import mlir_pytaco -from tools import mlir_pytaco_io -from tools import mlir_pytaco_utils as pytaco_utils -from tools import testing_utils as testing_utils - - -# Define the aliases to shorten the code. -_COMPRESSED = mlir_pytaco.ModeFormat.COMPRESSED -_DENSE = mlir_pytaco.ModeFormat.DENSE - - -_FORMAT = mlir_pytaco.Format([_COMPRESSED, _COMPRESSED]) -_MTX_DATA = """%%MatrixMarket matrix coordinate real general -3 3 3 -3 1 3 -1 2 2 -3 2 4 -""" - - -# CHECK-LABEL: test_read_mtx_matrix_general -@testing_utils.run_test -def test_read_mtx_matrix_general(): - with tempfile.TemporaryDirectory() as test_dir: - file_name = os.path.join(test_dir, "data.mtx") - with open(file_name, "w") as file: - file.write(_MTX_DATA) - a = mlir_pytaco_io.read(file_name, _FORMAT) - passed = 0 - # The value of a is stored as an MLIR sparse tensor. - passed += not a.is_unpacked() - a.unpack() - passed += a.is_unpacked() - coords, values = a.get_coordinates_and_values() - passed += np.array_equal(coords, [[0, 1], [2, 0], [2, 1]]) - passed += np.allclose(values, [2.0, 3.0, 4.0]) - # CHECK: 4 - print(passed) - - -_TNS_DATA = """2 3 -3 2 -3 1 3 -1 2 2 -3 2 4 -""" - - -# CHECK-LABEL: test_read_tns -@testing_utils.run_test -def test_read_tns(): - with tempfile.TemporaryDirectory() as test_dir: - file_name = os.path.join(test_dir, "data.tns") - with open(file_name, "w") as file: - file.write(_TNS_DATA) - a = mlir_pytaco_io.read(file_name, _FORMAT) - passed = 0 - # The value of a is stored as an MLIR sparse tensor. - passed += not a.is_unpacked() - a.unpack() - passed += a.is_unpacked() - coords, values = a.get_coordinates_and_values() - passed += np.array_equal(coords, [[0, 1], [2, 0], [2, 1]]) - passed += np.allclose(values, [2.0, 3.0, 4.0]) - # CHECK: 4 - print(passed) - - -# CHECK-LABEL: test_write_unpacked_tns -@testing_utils.run_test -def test_write_unpacked_tns(): - a = mlir_pytaco.Tensor([2, 3]) - a.insert([0, 1], 10) - a.insert([1, 2], 40) - a.insert([0, 0], 20) - with tempfile.TemporaryDirectory() as test_dir: - file_name = os.path.join(test_dir, "data.tns") - try: - mlir_pytaco_io.write(file_name, a) - except ValueError as e: - # CHECK: Writing unpacked sparse tensors to file is not supported - print(e) - - -# CHECK-LABEL: test_write_packed_tns -@testing_utils.run_test -def test_write_packed_tns(): - a = mlir_pytaco.Tensor([2, 3]) - a.insert([0, 1], 10) - a.insert([1, 2], 40) - a.insert([0, 0], 20) - b = mlir_pytaco.Tensor([2, 3]) - i, j = mlir_pytaco.get_index_vars(2) - b[i, j] = a[i, j] + a[i, j] - with tempfile.TemporaryDirectory() as test_dir: - file_name = os.path.join(test_dir, "data.tns") - mlir_pytaco_io.write(file_name, b) - with open(file_name, "r") as file: - lines = file.readlines() - passed = 0 - # Skip the comment line in the output. - if lines[1:] == ["2 3\n", "2 3\n", "1 1 40\n", "1 2 20\n", "2 3 80\n"]: - passed = 1 - # CHECK: 1 - print(passed) diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/unit_test_tensor_utils.py b/mlir/test/Integration/Dialect/SparseTensor/taco/unit_test_tensor_utils.py deleted file mode 100644 index 1344f4aa741ab..0000000000000 --- a/mlir/test/Integration/Dialect/SparseTensor/taco/unit_test_tensor_utils.py +++ /dev/null @@ -1,135 +0,0 @@ -# RUN: env SUPPORTLIB=%mlir_c_runner_utils %PYTHON %s | FileCheck %s - -from typing import Sequence -import dataclasses -import numpy as np -import os -import sys -import tempfile - -from mlir.dialects import sparse_tensor - -_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(_SCRIPT_PATH) -from tools import mlir_pytaco -from tools import mlir_pytaco_utils as pytaco_utils - -# Define the aliases to shorten the code. -_COMPRESSED = mlir_pytaco.ModeFormat.COMPRESSED -_DENSE = mlir_pytaco.ModeFormat.DENSE - - -def _to_string(s: Sequence[int]) -> str: - """Converts a sequence of integer to a space separated value string.""" - return " ".join(map(lambda e: str(e), s)) - - -def _add_one(s: Sequence[int]) -> Sequence[int]: - """Adds one to each element in the sequence of integer.""" - return [i + 1 for i in s] - - -@dataclasses.dataclass(frozen=True) -class _SparseTensorCOO: - """Values for a COO-flavored format sparse tensor. - - Attributes: - rank: An integer rank for the tensor. - nse: An integer for the number of non-zero values. - shape: A sequence of integer for the dimension size. - values: A sequence of float for the non-zero values of the tensor. - indices: A sequence of coordinate, each coordinate is a sequence of integer. - """ - - rank: int - nse: int - shape: Sequence[int] - values: Sequence[float] - indices: Sequence[Sequence[int]] - - -def _coo_values_to_tns_format(t: _SparseTensorCOO) -> str: - """Converts a sparse tensor COO-flavored values to TNS text format.""" - # The coo_value_str contains one line for each (coordinate value) pair. - # Indices are 1-based in TNS text format but 0-based in MLIR. - coo_value_str = "\n".join( - map( - lambda i: _to_string(_add_one(t.indices[i])) + " " + str(t.values[i]), - range(t.nse), - ) - ) - - # Returns the TNS text format representation for the tensor. - return f"""{t.rank} {t.nse} -{_to_string(t.shape)} -{coo_value_str} -""" - - -def _implement_read_tns_test( - t: _SparseTensorCOO, sparsity_codes: Sequence[sparse_tensor.DimLevelType] -) -> int: - tns_data = _coo_values_to_tns_format(t) - - # Write sparse tensor data to a file. - with tempfile.TemporaryDirectory() as test_dir: - file_name = os.path.join(test_dir, "data.tns") - with open(file_name, "w") as file: - file.write(tns_data) - - # Read the data from the file and construct an MLIR sparse tensor. - sparse_tensor, o_shape = pytaco_utils.create_sparse_tensor( - file_name, sparsity_codes, "f64" - ) - - passed = 0 - - # Verify the output shape for the tensor. - if np.array_equal(o_shape, t.shape): - passed += 1 - - # Use the output MLIR sparse tensor pointer to retrieve the COO-flavored - # values and verify the values. - ( - o_rank, - o_nse, - o_shape, - o_values, - o_indices, - ) = pytaco_utils.sparse_tensor_to_coo_tensor(sparse_tensor, np.float64) - if ( - o_rank == t.rank - and o_nse == t.nse - and np.array_equal(o_shape, t.shape) - and np.allclose(o_values, t.values) - and np.array_equal(o_indices, t.indices) - ): - passed += 1 - - return passed - - -# A 2D sparse tensor data in COO-flavored format. -_rank = 2 -_nse = 3 -_shape = [4, 5] -_values = [3.0, 2.0, 4.0] -_indices = [[0, 4], [1, 0], [3, 1]] - -_t = _SparseTensorCOO(_rank, _nse, _shape, _values, _indices) -_s = [_COMPRESSED, _COMPRESSED] -# CHECK: PASSED 2D: 2 -print("PASSED 2D: ", _implement_read_tns_test(_t, _s)) - - -# A 3D sparse tensor data in COO-flavored format. -_rank = 3 -_nse = 3 -_shape = [2, 5, 4] -_values = [3.0, 2.0, 4.0] -_indices = [[0, 4, 3], [1, 3, 0], [1, 3, 1]] - -_t = _SparseTensorCOO(_rank, _nse, _shape, _values, _indices) -_s = [_DENSE, _COMPRESSED, _COMPRESSED] -# CHECK: PASSED 3D: 2 -print("PASSED 3D: ", _implement_read_tns_test(_t, _s)) From d34f5dd07a2197ec91ecc33307eed6435ceda73f Mon Sep 17 00:00:00 2001 From: Peter Klausler <35819229+klausler@users.noreply.github.com> Date: Wed, 13 Sep 2023 15:55:40 -0700 Subject: [PATCH 13/22] [flang] Accept ALLOCATED(ARRAY=assumedRank) (#66233) The definitions of the ALLOCATED intrinsic in the intrinsics table did not allow for an assumed-rank array. --- flang/lib/Evaluate/intrinsics.cpp | 4 ++-- flang/test/Semantics/allocated.f90 | 11 ++++++++++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/flang/lib/Evaluate/intrinsics.cpp b/flang/lib/Evaluate/intrinsics.cpp index 030e5b2fd2c6d..7213482d9d798 100644 --- a/flang/lib/Evaluate/intrinsics.cpp +++ b/flang/lib/Evaluate/intrinsics.cpp @@ -315,10 +315,10 @@ static const IntrinsicInterface genericIntrinsicFunction[]{ {"aint", {{"a", SameReal}, MatchingDefaultKIND}, KINDReal}, {"all", {{"mask", SameLogical, Rank::array}, OptionalDIM}, SameLogical, Rank::dimReduced, IntrinsicClass::transformationalFunction}, - {"allocated", {{"array", AnyData, Rank::array}}, DefaultLogical, - Rank::elemental, IntrinsicClass::inquiryFunction}, {"allocated", {{"scalar", AnyData, Rank::scalar}}, DefaultLogical, Rank::elemental, IntrinsicClass::inquiryFunction}, + {"allocated", {{"array", AnyData, Rank::anyOrAssumedRank}}, DefaultLogical, + Rank::elemental, IntrinsicClass::inquiryFunction}, {"anint", {{"a", SameReal}, MatchingDefaultKIND}, KINDReal}, {"any", {{"mask", SameLogical, Rank::array}, OptionalDIM}, SameLogical, Rank::dimReduced, IntrinsicClass::transformationalFunction}, diff --git a/flang/test/Semantics/allocated.f90 b/flang/test/Semantics/allocated.f90 index 82ce7ca7bdb9f..a2eddaf82784a 100644 --- a/flang/test/Semantics/allocated.f90 +++ b/flang/test/Semantics/allocated.f90 @@ -1,6 +1,7 @@ ! RUN: %python %S/test_errors.py %s %flang_fc1 ! Tests for the ALLOCATED() intrinsic -subroutine alloc(coarray_alloc, coarray_not_alloc, t2_not_alloc) +subroutine alloc(coarray_alloc, coarray_not_alloc, t2_not_alloc, & + assumedRank) interface function return_allocatable() @@ -30,6 +31,7 @@ function return_allocatable() real :: coarray_not_alloc(:)[*] type(t2) :: t2_not_alloc + real, allocatable :: assumedRank(..) ! OK @@ -42,6 +44,13 @@ function return_allocatable() print *, allocated(coarray_alloc[2,3]) print *, allocated(t2_not_alloc%coarray_alloc) print *, allocated(t2_not_alloc%coarray_alloc[2]) + print *, allocated(assumedRank) + select rank (assumedRank) + rank (0) + print *, allocated(scalar=assumedRank) + rank default + print *, allocated(array=assumedRank) + end select !ERROR: Argument of ALLOCATED() must be an ALLOCATABLE object or component print *, allocated(not_alloc) From e0c54a2e7b9ca20bbf3fb554162bfcd6d5f56a1d Mon Sep 17 00:00:00 2001 From: "Kazushi (Jam) Marukawa" Date: Mon, 28 Aug 2023 21:01:57 +0900 Subject: [PATCH 14/22] [libc++abi][VE] Support VE in long double demangler Support VE in long double demangler. This patch corrects libcxxabi/test/test_demangle.pass.cpp on VE. Reviewed By: MaskRay, #libc_abi, ldionne Differential Revision: https://reviews.llvm.org/D159004 --- libcxxabi/src/demangle/ItaniumDemangle.h | 3 ++- llvm/include/llvm/Demangle/ItaniumDemangle.h | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/libcxxabi/src/demangle/ItaniumDemangle.h b/libcxxabi/src/demangle/ItaniumDemangle.h index c80f343cc876e..e96fee18b27a3 100644 --- a/libcxxabi/src/demangle/ItaniumDemangle.h +++ b/libcxxabi/src/demangle/ItaniumDemangle.h @@ -5134,7 +5134,8 @@ template <> struct FloatData { #if defined(__mips__) && defined(__mips_n64) || defined(__aarch64__) || \ - defined(__wasm__) || defined(__riscv) || defined(__loongarch__) + defined(__wasm__) || defined(__riscv) || defined(__loongarch__) || \ + defined(__ve__) static const size_t mangled_size = 32; #elif defined(__arm__) || defined(__mips__) || defined(__hexagon__) static const size_t mangled_size = 16; diff --git a/llvm/include/llvm/Demangle/ItaniumDemangle.h b/llvm/include/llvm/Demangle/ItaniumDemangle.h index 26acd38f8ae84..04faeb462e097 100644 --- a/llvm/include/llvm/Demangle/ItaniumDemangle.h +++ b/llvm/include/llvm/Demangle/ItaniumDemangle.h @@ -5129,7 +5129,8 @@ template <> struct FloatData { #if defined(__mips__) && defined(__mips_n64) || defined(__aarch64__) || \ - defined(__wasm__) || defined(__riscv) || defined(__loongarch__) + defined(__wasm__) || defined(__riscv) || defined(__loongarch__) || \ + defined(__ve__) static const size_t mangled_size = 32; #elif defined(__arm__) || defined(__mips__) || defined(__hexagon__) static const size_t mangled_size = 16; From 9a220dc6abd6ceeef3ca741b7d9a281bf05dbfd2 Mon Sep 17 00:00:00 2001 From: "Kazushi (Jam) Marukawa" Date: Fri, 25 Aug 2023 15:49:38 +0200 Subject: [PATCH 15/22] [libc++abi][test][VE] Add UNSUPPORTED to pass tests on VE Add UNSUPPORTED to pass tests on VE. VE uses SjLj libunwind, so _Unwind_Backtrace and _Unwind_ForcedUnwind are not implemented. Reviewed By: MaskRay, #libc_abi, ldionne Differential Revision: https://reviews.llvm.org/D159446 --- libcxxabi/test/backtrace_test.pass.cpp | 3 +++ libcxxabi/test/forced_unwind1.pass.cpp | 3 +++ libcxxabi/test/forced_unwind2.pass.cpp | 3 +++ libcxxabi/test/forced_unwind3.pass.cpp | 3 +++ 4 files changed, 12 insertions(+) diff --git a/libcxxabi/test/backtrace_test.pass.cpp b/libcxxabi/test/backtrace_test.pass.cpp index b95ce4f33be8e..1953429a469bd 100644 --- a/libcxxabi/test/backtrace_test.pass.cpp +++ b/libcxxabi/test/backtrace_test.pass.cpp @@ -8,6 +8,9 @@ // UNSUPPORTED: no-exceptions +// VE only supports SjLj and doesn't provide _Unwind_Backtrace. +// UNSUPPORTED: target={{ve-.*}} + #include #include #include diff --git a/libcxxabi/test/forced_unwind1.pass.cpp b/libcxxabi/test/forced_unwind1.pass.cpp index 1788ab5b46f10..1e94eb971be08 100644 --- a/libcxxabi/test/forced_unwind1.pass.cpp +++ b/libcxxabi/test/forced_unwind1.pass.cpp @@ -11,6 +11,9 @@ // UNSUPPORTED: no-exceptions, c++03 +// VE only supports SjLj and doesn't provide _Unwind_ForcedUnwind. +// UNSUPPORTED: target={{ve-.*}} + // These tests fail on previously released dylibs, investigation needed. // XFAIL: stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{9|10|11|12|13|14|15}} // XFAIL: stdlib=apple-libc++ && target={{.+}}-apple-macosx{{11.0|12.0}} diff --git a/libcxxabi/test/forced_unwind2.pass.cpp b/libcxxabi/test/forced_unwind2.pass.cpp index 15923afe1165c..65f5d5dd2e05c 100644 --- a/libcxxabi/test/forced_unwind2.pass.cpp +++ b/libcxxabi/test/forced_unwind2.pass.cpp @@ -10,6 +10,9 @@ // UNSUPPORTED: no-exceptions, c++03 +// VE only supports SjLj and doesn't provide _Unwind_ForcedUnwind. +// UNSUPPORTED: target={{ve-.*}} + // These tests fail on previously released dylibs, investigation needed. // XFAIL: stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{9|10|11|12|13|14|15}} // XFAIL: stdlib=apple-libc++ && target={{.+}}-apple-macosx{{11.0|12.0}} diff --git a/libcxxabi/test/forced_unwind3.pass.cpp b/libcxxabi/test/forced_unwind3.pass.cpp index 91bb74caf4098..e852ab592292f 100644 --- a/libcxxabi/test/forced_unwind3.pass.cpp +++ b/libcxxabi/test/forced_unwind3.pass.cpp @@ -13,6 +13,9 @@ // UNSUPPORTED: no-threads // UNSUPPORTED: no-exceptions +// VE only supports SjLj and doesn't provide _Unwind_ForcedUnwind. +// UNSUPPORTED: target={{ve-.*}} + #include #include #include From 4fed5959974e4a85504667ce47ef03234dd9aec6 Mon Sep 17 00:00:00 2001 From: Peter Klausler <35819229+klausler@users.noreply.github.com> Date: Wed, 13 Sep 2023 16:13:31 -0700 Subject: [PATCH 16/22] [flang] Correct semantic representation & handling of RANK(*) (#66234) A RANK(*) case in a SELECT RANK construct selects the case of an assumed-rank dummy argument whose effective actual argument is an assumed-size array. In this case, the attributes of the selector are those of a rank-1 assumed-size array, and the selector cannot be allocatable or a pointer. Ensure that the representation of a SELECT RANK construct's per-case AssocEntityDetails can distinguish RANK(n), RANK(*), and RANK DEFAULT, and clean up various code sites and tests where the distinctions matter. --- flang/include/flang/Evaluate/tools.h | 5 +-- flang/include/flang/Semantics/symbol.h | 39 ++++++++++++++++++----- flang/include/flang/Semantics/tools.h | 9 ++++-- flang/lib/Evaluate/shape.cpp | 35 ++++++++++++-------- flang/lib/Evaluate/tools.cpp | 23 ++++--------- flang/lib/Semantics/check-allocate.cpp | 39 ++++++++++++----------- flang/lib/Semantics/check-select-rank.cpp | 2 +- flang/lib/Semantics/expression.cpp | 6 ++-- flang/lib/Semantics/resolve-names.cpp | 31 +++++++++++++----- flang/lib/Semantics/symbol.cpp | 10 ++++-- flang/test/Semantics/misc-intrinsics.f90 | 21 ++++++++++++ flang/test/Semantics/select-rank.f90 | 8 ++--- flang/test/Semantics/select-rank03.f90 | 17 ++++++---- 13 files changed, 160 insertions(+), 85 deletions(-) diff --git a/flang/include/flang/Evaluate/tools.h b/flang/include/flang/Evaluate/tools.h index b3f8f4a67a7b5..71fe1237efdde 100644 --- a/flang/include/flang/Evaluate/tools.h +++ b/flang/include/flang/Evaluate/tools.h @@ -1224,10 +1224,11 @@ bool IsEventTypeOrLockType(const DerivedTypeSpec *); // of the construct entity. // (E.g., for ASSOCIATE(x => y%z), ResolveAssociations(x) returns x, // while GetAssociationRoot(x) returns y.) -// ResolveAssociationsExceptSelectRank() stops at a RANK case symbol. +// In a SELECT RANK construct, ResolveAssociations() stops at a +// RANK(n) or RANK(*) case symbol, but traverses the selector for +// RANK DEFAULT. const Symbol &ResolveAssociations(const Symbol &); const Symbol &GetAssociationRoot(const Symbol &); -const Symbol &ResolveAssociationsExceptSelectRank(const Symbol &); const Symbol *FindCommonBlockContaining(const Symbol &); int CountLenParameters(const DerivedTypeSpec &); diff --git a/flang/include/flang/Semantics/symbol.h b/flang/include/flang/Semantics/symbol.h index aada3bf94cc12..a5f4ad76c26b7 100644 --- a/flang/include/flang/Semantics/symbol.h +++ b/flang/include/flang/Semantics/symbol.h @@ -278,12 +278,33 @@ class AssocEntityDetails : public EntityDetails { AssocEntityDetails &operator=(const AssocEntityDetails &) = default; AssocEntityDetails &operator=(AssocEntityDetails &&) = default; const MaybeExpr &expr() const { return expr_; } + + // SELECT RANK's rank cases will return a populated result for + // RANK(n) and RANK(*), and IsAssumedRank() will be true for + // RANK DEFAULT. + std::optional rank() const { + int r{rank_.value_or(0)}; + if (r == isAssumedSize) { + return 1; // RANK(*) + } else if (r == isAssumedRank) { + return std::nullopt; // RANK DEFAULT + } else { + return rank_; + } + } + bool IsAssumedSize() const { return rank_.value_or(0) == isAssumedSize; } + bool IsAssumedRank() const { return rank_.value_or(0) == isAssumedRank; } void set_rank(int rank); - std::optional rank() const { return rank_; } + void set_IsAssumedSize(); + void set_IsAssumedRank(); private: MaybeExpr expr_; - std::optional rank_; // for SELECT RANK + // Populated for SELECT RANK with rank (n>=0) for RANK(n), + // isAssumedSize for RANK(*), or isAssumedRank for RANK DEFAULT. + static constexpr int isAssumedSize{-1}; // RANK(*) + static constexpr int isAssumedRank{-2}; // RANK DEFAULT + std::optional rank_; }; llvm::raw_ostream &operator<<(llvm::raw_ostream &, const AssocEntityDetails &); @@ -862,12 +883,14 @@ class Symbol { return iface ? iface->RankImpl(depth) : 0; }, [](const AssocEntityDetails &aed) { - if (const auto &expr{aed.expr()}) { - if (auto assocRank{aed.rank()}) { - return *assocRank; - } else { - return expr->Rank(); - } + if (auto assocRank{aed.rank()}) { + // RANK(n) & RANK(*) + return *assocRank; + } else if (aed.IsAssumedRank()) { + // RANK DEFAULT + return 0; + } else if (const auto &expr{aed.expr()}) { + return expr->Rank(); } else { return 0; } diff --git a/flang/include/flang/Semantics/tools.h b/flang/include/flang/Semantics/tools.h index 5bcb96e6050fa..12649da6adbe2 100644 --- a/flang/include/flang/Semantics/tools.h +++ b/flang/include/flang/Semantics/tools.h @@ -179,8 +179,13 @@ const Symbol *IsFinalizable(const DerivedTypeSpec &, const Symbol *HasImpureFinal(const Symbol &); bool IsInBlankCommon(const Symbol &); inline bool IsAssumedSizeArray(const Symbol &symbol) { - const auto *details{symbol.detailsIf()}; - return details && details->IsAssumedSize(); + if (const auto *object{symbol.detailsIf()}) { + return object->IsAssumedSize(); + } else if (const auto *assoc{symbol.detailsIf()}) { + return assoc->IsAssumedSize(); + } else { + return false; + } } bool IsAssumedLengthCharacter(const Symbol &); bool IsExternal(const Symbol &); diff --git a/flang/lib/Evaluate/shape.cpp b/flang/lib/Evaluate/shape.cpp index 8f4923ff96a94..e26479cc1f055 100644 --- a/flang/lib/Evaluate/shape.cpp +++ b/flang/lib/Evaluate/shape.cpp @@ -248,17 +248,17 @@ class GetLowerBoundHelper Result GetLowerBound(const Symbol &symbol0, NamedEntity &&base) const { const Symbol &symbol{symbol0.GetUltimate()}; - if (const auto *details{ + if (const auto *object{ symbol.detailsIf()}) { - int rank{details->shape().Rank()}; + int rank{object->shape().Rank()}; if (dimension_ < rank) { - const semantics::ShapeSpec &shapeSpec{details->shape()[dimension_]}; + const semantics::ShapeSpec &shapeSpec{object->shape()[dimension_]}; if (shapeSpec.lbound().isExplicit()) { if (const auto &lbound{shapeSpec.lbound().GetExplicit()}) { if constexpr (LBOUND_SEMANTICS) { bool ok{false}; auto lbValue{ToInt64(*lbound)}; - if (dimension_ == rank - 1 && details->IsAssumedSize()) { + if (dimension_ == rank - 1 && object->IsAssumedSize()) { // last dimension of assumed-size dummy array: don't worry // about handling an empty dimension ok = !invariantOnly_ || IsScopeInvariantExpr(*lbound); @@ -309,7 +309,10 @@ class GetLowerBoundHelper } } else if (const auto *assoc{ symbol.detailsIf()}) { - if (assoc->rank()) { // SELECT RANK case + if (assoc->IsAssumedSize()) { // RANK(*) + return Result{1}; + } else if (assoc->IsAssumedRank()) { // RANK DEFAULT + } else if (assoc->rank()) { // RANK(n) const Symbol &resolved{ResolveAssociations(symbol)}; if (IsDescriptor(resolved) && dimension_ < *assoc->rank()) { return ExtentExpr{DescriptorInquiry{std::move(base), @@ -497,9 +500,11 @@ MaybeExtentExpr GetExtent( const NamedEntity &base, int dimension, bool invariantOnly) { CHECK(dimension >= 0); const Symbol &last{base.GetLastSymbol()}; - const Symbol &symbol{ResolveAssociationsExceptSelectRank(last)}; + const Symbol &symbol{ResolveAssociations(last)}; if (const auto *assoc{last.detailsIf()}) { - if (assoc->rank()) { // SELECT RANK case + if (assoc->IsAssumedSize() || assoc->IsAssumedRank()) { // RANK(*)/DEFAULT + return std::nullopt; + } else if (assoc->rank()) { // RANK(n) if (semantics::IsDescriptor(symbol) && dimension < *assoc->rank()) { return ExtentExpr{DescriptorInquiry{ NamedEntity{base}, DescriptorInquiry::Field::Extent, dimension}}; @@ -595,8 +600,7 @@ MaybeExtentExpr ComputeUpperBound( MaybeExtentExpr GetRawUpperBound( const NamedEntity &base, int dimension, bool invariantOnly) { - const Symbol &symbol{ - ResolveAssociationsExceptSelectRank(base.GetLastSymbol())}; + const Symbol &symbol{ResolveAssociations(base.GetLastSymbol())}; if (const auto *details{symbol.detailsIf()}) { int rank{details->shape().Rank()}; if (dimension < rank) { @@ -612,7 +616,11 @@ MaybeExtentExpr GetRawUpperBound( } } else if (const auto *assoc{ symbol.detailsIf()}) { - if (auto extent{GetAssociatedExtent(base, *assoc, dimension)}) { + if (assoc->IsAssumedSize() || assoc->IsAssumedRank()) { + return std::nullopt; + } else if (assoc->rank() && dimension >= *assoc->rank()) { + return std::nullopt; + } else if (auto extent{GetAssociatedExtent(base, *assoc, dimension)}) { return ComputeUpperBound( GetRawLowerBound(base, dimension), std::move(extent)); } @@ -645,8 +653,7 @@ static MaybeExtentExpr GetExplicitUBOUND(FoldingContext *context, static MaybeExtentExpr GetUBOUND(FoldingContext *context, const NamedEntity &base, int dimension, bool invariantOnly) { - const Symbol &symbol{ - ResolveAssociationsExceptSelectRank(base.GetLastSymbol())}; + const Symbol &symbol{ResolveAssociations(base.GetLastSymbol())}; if (const auto *details{symbol.detailsIf()}) { int rank{details->shape().Rank()}; if (dimension < rank) { @@ -662,7 +669,9 @@ static MaybeExtentExpr GetUBOUND(FoldingContext *context, } } else if (const auto *assoc{ symbol.detailsIf()}) { - if (assoc->rank()) { // SELECT RANK case + if (assoc->IsAssumedSize() || assoc->IsAssumedRank()) { + return std::nullopt; + } else if (assoc->rank()) { // RANK (n) const Symbol &resolved{ResolveAssociations(symbol)}; if (IsDescriptor(resolved) && dimension < *assoc->rank()) { ExtentExpr lb{DescriptorInquiry{NamedEntity{base}, diff --git a/flang/lib/Evaluate/tools.cpp b/flang/lib/Evaluate/tools.cpp index d2fa5c9b5f36b..aadbc0804b342 100644 --- a/flang/lib/Evaluate/tools.cpp +++ b/flang/lib/Evaluate/tools.cpp @@ -702,15 +702,14 @@ std::optional> ConvertToType( bool IsAssumedRank(const Symbol &original) { if (const auto *assoc{original.detailsIf()}) { if (assoc->rank()) { - return false; // in SELECT RANK case + return false; // in RANK(n) or RANK(*) + } else if (assoc->IsAssumedRank()) { + return true; // RANK DEFAULT } } const Symbol &symbol{semantics::ResolveAssociations(original)}; - if (const auto *details{symbol.detailsIf()}) { - return details->IsAssumedRank(); - } else { - return false; - } + const auto *object{symbol.detailsIf()}; + return object && object->IsAssumedRank(); } bool IsAssumedRank(const ActualArgument &arg) { @@ -1209,17 +1208,7 @@ namespace Fortran::semantics { const Symbol &ResolveAssociations(const Symbol &original) { const Symbol &symbol{original.GetUltimate()}; if (const auto *details{symbol.detailsIf()}) { - if (const Symbol * nested{UnwrapWholeSymbolDataRef(details->expr())}) { - return ResolveAssociations(*nested); - } - } - return symbol; -} - -const Symbol &ResolveAssociationsExceptSelectRank(const Symbol &original) { - const Symbol &symbol{original.GetUltimate()}; - if (const auto *details{symbol.detailsIf()}) { - if (!details->rank()) { + if (!details->rank()) { // Not RANK(n) or RANK(*) if (const Symbol * nested{UnwrapWholeSymbolDataRef(details->expr())}) { return ResolveAssociations(*nested); } diff --git a/flang/lib/Semantics/check-allocate.cpp b/flang/lib/Semantics/check-allocate.cpp index 12d795290d927..2edb8e59fd084 100644 --- a/flang/lib/Semantics/check-allocate.cpp +++ b/flang/lib/Semantics/check-allocate.cpp @@ -39,16 +39,11 @@ class AllocationCheckerHelper { public: AllocationCheckerHelper( const parser::Allocation &alloc, AllocateCheckerInfo &info) - : allocateInfo_{info}, - allocateObject_{std::get(alloc.t)}, - name_{parser::GetLastName(allocateObject_)}, - original_{name_.symbol ? &name_.symbol->GetUltimate() : nullptr}, - symbol_{original_ ? &ResolveAssociations(*original_) : nullptr}, - type_{symbol_ ? symbol_->GetType() : nullptr}, - allocateShapeSpecRank_{ShapeSpecRank(alloc)}, - rank_{original_ ? original_->Rank() : 0}, - allocateCoarraySpecRank_{CoarraySpecRank(alloc)}, - corank_{symbol_ ? symbol_->Corank() : 0} {} + : allocateInfo_{info}, allocateObject_{std::get( + alloc.t)}, + allocateShapeSpecRank_{ShapeSpecRank(alloc)}, allocateCoarraySpecRank_{ + CoarraySpecRank( + alloc)} {} bool RunChecks(SemanticsContext &context); @@ -90,14 +85,17 @@ class AllocationCheckerHelper { AllocateCheckerInfo &allocateInfo_; const parser::AllocateObject &allocateObject_; - const parser::Name &name_; - const Symbol *original_{nullptr}; // no USE or host association - const Symbol *symbol_{nullptr}; // no USE, host, or construct association - const DeclTypeSpec *type_{nullptr}; - const int allocateShapeSpecRank_; - const int rank_{0}; - const int allocateCoarraySpecRank_; - const int corank_{0}; + const int allocateShapeSpecRank_{0}; + const int allocateCoarraySpecRank_{0}; + const parser::Name &name_{parser::GetLastName(allocateObject_)}; + // no USE or host association + const Symbol *original_{ + name_.symbol ? &name_.symbol->GetUltimate() : nullptr}; + // no USE, host, or construct association + const Symbol *symbol_{original_ ? &ResolveAssociations(*original_) : nullptr}; + const DeclTypeSpec *type_{symbol_ ? symbol_->GetType() : nullptr}; + const int rank_{original_ ? original_->Rank() : 0}; + const int corank_{symbol_ ? symbol_->Corank() : 0}; bool hasDeferredTypeParameter_{false}; bool isUnlimitedPolymorphic_{false}; bool isAbstract_{false}; @@ -539,6 +537,11 @@ bool AllocationCheckerHelper::RunChecks(SemanticsContext &context) { } } // Shape related checks + if (symbol_ && evaluate::IsAssumedRank(*symbol_)) { + context.Say(name_.source, + "An assumed-rank object may not appear in an ALLOCATE statement"_err_en_US); + return false; + } if (rank_ > 0) { if (!hasAllocateShapeSpecList()) { // C939 diff --git a/flang/lib/Semantics/check-select-rank.cpp b/flang/lib/Semantics/check-select-rank.cpp index 424f9b45d64cd..2e602d307013c 100644 --- a/flang/lib/Semantics/check-select-rank.cpp +++ b/flang/lib/Semantics/check-select-rank.cpp @@ -87,7 +87,7 @@ void SelectRankConstructChecker::Leave( } if (saveSelSymbol && IsAllocatableOrPointer(*saveSelSymbol)) { // F'2023 C1160 - context_.Say(parser::FindSourceLocation(selectRankStmtSel), + context_.Say(rankCaseStmt.source, "RANK (*) cannot be used when selector is " "POINTER or ALLOCATABLE"_err_en_US); } diff --git a/flang/lib/Semantics/expression.cpp b/flang/lib/Semantics/expression.cpp index d690f3da6820d..4ccb2c3ef5d01 100644 --- a/flang/lib/Semantics/expression.cpp +++ b/flang/lib/Semantics/expression.cpp @@ -260,11 +260,11 @@ MaybeExpr ExpressionAnalyzer::CompleteSubscripts(ArrayRef &&ref) { symbolRank, symbol.name(), subscripts); } return std::nullopt; - } else if (const auto *object{ - symbol.detailsIf()}) { + } else if (symbol.has() || + symbol.has()) { // C928 & C1002 if (Triplet *last{std::get_if(&ref.subscript().back().u)}) { - if (!last->upper() && object->IsAssumedSize()) { + if (!last->upper() && IsAssumedSizeArray(symbol)) { Say("Assumed-size array '%s' must have explicit final " "subscript upper bound value"_err_en_US, symbol.name()); diff --git a/flang/lib/Semantics/resolve-names.cpp b/flang/lib/Semantics/resolve-names.cpp index 29cd107186fb5..126c035ef57fa 100644 --- a/flang/lib/Semantics/resolve-names.cpp +++ b/flang/lib/Semantics/resolve-names.cpp @@ -6942,17 +6942,32 @@ void ConstructVisitor::Post(const parser::TypeGuardStmt::Guard &x) { void ConstructVisitor::Post(const parser::SelectRankCaseStmt::Rank &x) { if (auto *symbol{MakeAssocEntity()}) { SetTypeFromAssociation(*symbol); + auto &details{symbol->get()}; // Don't call SetAttrsFromAssociation() for SELECT RANK. - symbol->attrs() |= - evaluate::GetAttrs(GetCurrentAssociation().selector.expr) & - Attrs{Attr::ALLOCATABLE, Attr::ASYNCHRONOUS, Attr::POINTER, - Attr::TARGET, Attr::VOLATILE}; - if (const auto *init{std::get_if(&x.u)}) { - if (auto val{EvaluateInt64(context(), *init)}) { - auto &details{symbol->get()}; - details.set_rank(*val); + Attrs selectorAttrs{ + evaluate::GetAttrs(GetCurrentAssociation().selector.expr)}; + Attrs attrsToKeep{Attr::ASYNCHRONOUS, Attr::TARGET, Attr::VOLATILE}; + if (const auto *rankValue{ + std::get_if(&x.u)}) { + // RANK(n) + if (auto expr{EvaluateIntExpr(*rankValue)}) { + if (auto val{evaluate::ToInt64(*expr)}) { + details.set_rank(*val); + attrsToKeep |= Attrs{Attr::ALLOCATABLE, Attr::POINTER}; + } else { + Say("RANK() expression must be constant"_err_en_US); + } } + } else if (std::holds_alternative(x.u)) { + // RANK(*): assumed-size + details.set_IsAssumedSize(); + } else { + CHECK(std::holds_alternative(x.u)); + // RANK DEFAULT: assumed-rank + details.set_IsAssumedRank(); + attrsToKeep |= Attrs{Attr::ALLOCATABLE, Attr::POINTER}; } + symbol->attrs() |= selectorAttrs & attrsToKeep; } } diff --git a/flang/lib/Semantics/symbol.cpp b/flang/lib/Semantics/symbol.cpp index 2e14b2e8a1955..f4edc8a08fe69 100644 --- a/flang/lib/Semantics/symbol.cpp +++ b/flang/lib/Semantics/symbol.cpp @@ -153,6 +153,8 @@ void EntityDetails::set_type(const DeclTypeSpec &type) { } void AssocEntityDetails::set_rank(int rank) { rank_ = rank; } +void AssocEntityDetails::set_IsAssumedSize() { rank_ = isAssumedSize; } +void AssocEntityDetails::set_IsAssumedRank() { rank_ = isAssumedRank; } void EntityDetails::ReplaceType(const DeclTypeSpec &type) { type_ = &type; } ObjectEntityDetails::ObjectEntityDetails(EntityDetails &&d) @@ -438,8 +440,12 @@ llvm::raw_ostream &operator<<( llvm::raw_ostream &operator<<( llvm::raw_ostream &os, const AssocEntityDetails &x) { os << *static_cast(&x); - if (auto assocRank{x.rank()}) { - os << " rank: " << *assocRank; + if (x.IsAssumedSize()) { + os << " RANK(*)"; + } else if (x.IsAssumedRank()) { + os << " RANK DEFAULT"; + } else if (auto assocRank{x.rank()}) { + os << " RANK(" << *assocRank << ')'; } DumpExpr(os, "expr", x.expr()); return os; diff --git a/flang/test/Semantics/misc-intrinsics.f90 b/flang/test/Semantics/misc-intrinsics.f90 index c8f6529970ca0..195906eef9d79 100644 --- a/flang/test/Semantics/misc-intrinsics.f90 +++ b/flang/test/Semantics/misc-intrinsics.f90 @@ -42,6 +42,25 @@ subroutine test(arg, assumedRank) print *, lbound(assumedRank, dim=2) !ERROR: DIM=2 dimension is out of range for rank-1 array print *, ubound(assumedRank, dim=2) + rank(*) + !ERROR: A dim= argument is required for 'size' when the array is assumed-size + print *, size(assumedRank) + !ERROR: missing mandatory 'dim=' argument + print *, ubound(assumedRank) + !ERROR: The 'source=' argument to the intrinsic function 'shape' may not be assumed-size + print *, shape(assumedRank) + !ERROR: The 'harvest=' argument to the intrinsic procedure 'random_number' may not be assumed-size + call random_number(assumedRank) + !ERROR: DIM=0 dimension must be positive + print *, lbound(assumedRank, 0) + !ERROR: DIM=0 dimension must be positive + print *, ubound(assumedRank, 0) + !ERROR: DIM=1 dimension is out of range for rank-1 assumed-size array + print *, ubound(assumedRank, 1) + !ERROR: DIM=2 dimension is out of range for rank-1 array + print *, lbound(assumedRank, dim=2) + !ERROR: DIM=2 dimension is out of range for rank-1 array + print *, ubound(assumedRank, dim=2) end select ! But these cases are fine: print *, size(arg, dim=1) @@ -60,6 +79,8 @@ subroutine test(arg, assumedRank) rank(3) print *, lbound(assumedRank, dim=2) print *, ubound(assumedRank, dim=2) + rank(*) + print *, lbound(assumedRank, dim=1) rank default print *, lbound(assumedRank, dim=2) print *, ubound(assumedRank, dim=2) diff --git a/flang/test/Semantics/select-rank.f90 b/flang/test/Semantics/select-rank.f90 index 0dc915a99914a..fa8d2fc4d461d 100644 --- a/flang/test/Semantics/select-rank.f90 +++ b/flang/test/Semantics/select-rank.f90 @@ -109,7 +109,8 @@ subroutine CALL_ME6(x) j = INT(0, KIND=MERGE(KIND(0), -1, RANK(x) == 3)) !ERROR: The value of the selector must be between zero and 15 RANK(-1) - print *, "rank: -ve" + print *, "rank: negative" + !ERROR: 'kind=' argument must be a constant scalar integer whose value is a supported kind for the intrinsic result type j = INT(0, KIND=MERGE(KIND(0), -1, RANK(x) == -1)) END SELECT end subroutine @@ -119,8 +120,8 @@ subroutine CALL_ME7(arg) integer :: i,j integer, dimension(..), pointer :: arg integer, pointer :: arg2 - !ERROR: RANK (*) cannot be used when selector is POINTER or ALLOCATABLE select RANK(arg) + !ERROR: RANK (*) cannot be used when selector is POINTER or ALLOCATABLE RANK (*) print *, arg(1:1) RANK (1) @@ -146,13 +147,10 @@ subroutine CALL_ME8(x) print *, "Now it's rank 2 " RANK (*) print *, "Going for another rank" - !ERROR: 'kind=' argument must be a constant scalar integer whose value is a supported kind for the intrinsic result type j = INT(0, KIND=MERGE(KIND(0), -1, RANK(x) == 1)) !ERROR: Not more than one of the selectors of SELECT RANK statement may be '*' RANK (*) print *, "This is Wrong" - !ERROR: 'kind=' argument must be a constant scalar integer whose value is a supported kind for the intrinsic result type - j = INT(0, KIND=MERGE(KIND(0), -1, RANK(x) == 1)) END SELECT end subroutine diff --git a/flang/test/Semantics/select-rank03.f90 b/flang/test/Semantics/select-rank03.f90 index 038380435d00d..f49767c5adf33 100644 --- a/flang/test/Semantics/select-rank03.f90 +++ b/flang/test/Semantics/select-rank03.f90 @@ -6,7 +6,6 @@ program test contains subroutine allocatables(a) real, allocatable :: a(..) - !ERROR: RANK (*) cannot be used when selector is POINTER or ALLOCATABLE select rank(a) rank (0) allocate(a) ! ok @@ -44,13 +43,17 @@ subroutine allocatables(a) allocate(a, source=a1) !ERROR: Arrays in ALLOCATE must have a shape specification or an expression of the same rank must appear in SOURCE or MOLD allocate(a, mold=p1) + !ERROR: RANK (*) cannot be used when selector is POINTER or ALLOCATABLE rank (*) - !ERROR: Arrays in ALLOCATE must have a shape specification or an expression of the same rank must appear in SOURCE or MOLD + !ERROR: Whole assumed-size array 'a' may not appear here without subscripts + !ERROR: Entity in ALLOCATE statement must have the ALLOCATABLE or POINTER attribute allocate(a) + !ERROR: Whole assumed-size array 'a' may not appear here without subscripts deallocate(a) + !ERROR: Whole assumed-size array 'a' may not appear here without subscripts a = 1. rank default - !ERROR: Arrays in ALLOCATE must have a shape specification or an expression of the same rank must appear in SOURCE or MOLD + !ERROR: An assumed-rank object may not appear in an ALLOCATE statement allocate(a) deallocate(a) a = 1. @@ -58,7 +61,6 @@ subroutine allocatables(a) end subroutine pointers(p) real, pointer :: p(..) - !ERROR: RANK (*) cannot be used when selector is POINTER or ALLOCATABLE select rank(p) rank (0) allocate(p) ! ok @@ -98,12 +100,15 @@ subroutine pointers(p) p => t0 !ERROR: Pointer has rank 2 but target has rank 1 p => t1 + !ERROR: RANK (*) cannot be used when selector is POINTER or ALLOCATABLE rank (*) - !ERROR: Arrays in ALLOCATE must have a shape specification or an expression of the same rank must appear in SOURCE or MOLD + !ERROR: Whole assumed-size array 'p' may not appear here without subscripts + !ERROR: Entity in ALLOCATE statement must have the ALLOCATABLE or POINTER attribute allocate(p) + !ERROR: Whole assumed-size array 'p' may not appear here without subscripts deallocate(p) rank default - !ERROR: Arrays in ALLOCATE must have a shape specification or an expression of the same rank must appear in SOURCE or MOLD + !ERROR: An assumed-rank object may not appear in an ALLOCATE statement allocate(p) deallocate(p) !ERROR: pointer 'p' associated with object 't0' with incompatible type or shape From 6385c1df919f237d4149fabf542a158f61010bf8 Mon Sep 17 00:00:00 2001 From: Leonard Chan Date: Wed, 13 Sep 2023 23:11:50 +0000 Subject: [PATCH 17/22] [clang] Add experimental option to omit the RTTI component from the vtable when -fno-rtti is used For programs that don't use RTTI, the rtti component is just replaced with a zero. This way, vtables that don't use RTTI can still cooperate with vtables that use RTTI since offset calculations on the ABI level would still work. However, if throughout your whole program you don't use RTTI at all (such as the embedded case), then this is just an unused pointer-sized component that's wasting space. This adds an experimental option for removing the RTTI component from the vtable. Some notes: - This is only allowed when RTTI is disabled, so we don't have to worry about things like `typeid` or `dynamic_cast`. - This is a "use at your own risk" since, similar to relative vtables, everything must be compiled with this since it's an ABI breakage. That is, a program compiled with this is not guaranteed to work with a program compiled without this, even if RTTI is disabled for both programs. Note that this is a completely different ABI flavor orthogonal to the relative-vtables ABI. That is, they can be enabled/disabled independently. Differential Revision: https://reviews.llvm.org/D152405 --- .../clang/Basic/DiagnosticDriverKinds.td | 3 ++ clang/include/clang/Basic/LangOptions.def | 3 ++ clang/include/clang/Driver/Options.td | 6 +++ clang/lib/AST/VTableBuilder.cpp | 9 +++- clang/lib/Driver/ToolChains/Clang.cpp | 6 +++ clang/lib/Frontend/CompilerInvocation.cpp | 8 +++ .../simple-vtable-definition.cpp | 32 ++++++++++++ .../OmitRTTIComponentABI/vbase-offset.cpp | 51 +++++++++++++++++++ .../OmitRTTIComponentABI/vtable-layout.cpp | 19 +++++++ .../test/Driver/omit-rtti-component-flag.cpp | 5 ++ .../omit-rtti-component-without-no-rtti.cpp | 13 +++++ 11 files changed, 153 insertions(+), 2 deletions(-) create mode 100644 clang/test/CodeGenCXX/OmitRTTIComponentABI/simple-vtable-definition.cpp create mode 100644 clang/test/CodeGenCXX/OmitRTTIComponentABI/vbase-offset.cpp create mode 100644 clang/test/CodeGenCXX/OmitRTTIComponentABI/vtable-layout.cpp create mode 100644 clang/test/Driver/omit-rtti-component-flag.cpp create mode 100644 clang/test/Driver/omit-rtti-component-without-no-rtti.cpp diff --git a/clang/include/clang/Basic/DiagnosticDriverKinds.td b/clang/include/clang/Basic/DiagnosticDriverKinds.td index 86567267cfb43..9349ff85ca8a1 100644 --- a/clang/include/clang/Basic/DiagnosticDriverKinds.td +++ b/clang/include/clang/Basic/DiagnosticDriverKinds.td @@ -674,6 +674,9 @@ def err_cc1_round_trip_mismatch : Error< def err_cc1_unbounded_vscale_min : Error< "minimum vscale must be an unsigned integer greater than 0">; +def err_drv_using_omit_rtti_component_without_no_rtti : Error< + "-fexperimental-omit-vtable-rtti call only be used with -fno-rtti">; + def err_drv_ssp_missing_offset_argument : Error< "'%0' is used without '-mstack-protector-guard-offset', and there is no default">; diff --git a/clang/include/clang/Basic/LangOptions.def b/clang/include/clang/Basic/LangOptions.def index 102209ce899d7..e18b5b80a34e7 100644 --- a/clang/include/clang/Basic/LangOptions.def +++ b/clang/include/clang/Basic/LangOptions.def @@ -450,6 +450,9 @@ LANGOPT(SpeculativeLoadHardening, 1, 0, "Speculative load hardening enabled") LANGOPT(RelativeCXXABIVTables, 1, 0, "Use an ABI-incompatible v-table layout that uses relative references") +LANGOPT(OmitVTableRTTI, 1, 0, + "Use an ABI-incompatible v-table layout that omits the RTTI component") + LANGOPT(VScaleMin, 32, 0, "Minimum vscale value") LANGOPT(VScaleMax, 32, 0, "Maximum vscale value") diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 2d42d05859bc1..553c7928c4f94 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -2671,6 +2671,12 @@ def fno_experimental_relative_cxx_abi_vtables : Group, Visibility<[ClangOption, CC1Option]>, HelpText<"Do not use the experimental C++ class ABI for classes with virtual tables">; +defm experimental_omit_vtable_rtti : BoolFOption<"experimental-omit-vtable-rtti", + LangOpts<"OmitVTableRTTI">, DefaultFalse, + PosFlag, + NegFlag, + BothFlags<[], [CC1Option], " the RTTI component from virtual tables">>; + def fcxx_abi_EQ : Joined<["-"], "fc++-abi=">, Group, Visibility<[ClangOption, CC1Option]>, HelpText<"C++ ABI to use. This will override the target C++ ABI.">; diff --git a/clang/lib/AST/VTableBuilder.cpp b/clang/lib/AST/VTableBuilder.cpp index a587f9bdc7585..cce0a507e8077 100644 --- a/clang/lib/AST/VTableBuilder.cpp +++ b/clang/lib/AST/VTableBuilder.cpp @@ -665,7 +665,11 @@ CharUnits VCallAndVBaseOffsetBuilder::getCurrentOffsetOffset() const { // vtable address point. (We subtract 3 to account for the information just // above the address point, the RTTI info, the offset to top, and the // vcall offset itself). - int64_t OffsetIndex = -(int64_t)(3 + Components.size()); + size_t NumComponentsAboveAddrPoint = 3; + if (Context.getLangOpts().OmitVTableRTTI) + NumComponentsAboveAddrPoint--; + int64_t OffsetIndex = + -(int64_t)(NumComponentsAboveAddrPoint + Components.size()); // Under the relative ABI, the offset widths are 32-bit ints instead of // pointer widths. @@ -1669,7 +1673,8 @@ void ItaniumVTableBuilder::LayoutPrimaryAndSecondaryVTables( Components.push_back(VTableComponent::MakeOffsetToTop(OffsetToTop)); // Next, add the RTTI. - Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass)); + if (!Context.getLangOpts().OmitVTableRTTI) + Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass)); uint64_t AddressPoint = Components.size(); diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 531677e92f732..40e60585a8b8d 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5526,6 +5526,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, Args.AddLastArg(CmdArgs, options::OPT_fexperimental_relative_cxx_abi_vtables, options::OPT_fno_experimental_relative_cxx_abi_vtables); + Args.AddLastArg(CmdArgs, options::OPT_fexperimental_omit_vtable_rtti, + options::OPT_fno_experimental_omit_vtable_rtti); + // Handle segmented stacks. Args.addOptInFlag(CmdArgs, options::OPT_fsplit_stack, options::OPT_fno_split_stack); @@ -6007,6 +6010,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, Args.AddLastArg(CmdArgs, options::OPT_fexperimental_relative_cxx_abi_vtables, options::OPT_fno_experimental_relative_cxx_abi_vtables); + Args.AddLastArg(CmdArgs, options::OPT_fexperimental_omit_vtable_rtti, + options::OPT_fno_experimental_omit_vtable_rtti); + if (Arg *A = Args.getLastArg(options::OPT_ffuchsia_api_level_EQ)) A->render(Args, CmdArgs); diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 730db8e394f66..2dd299b5d1032 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -4109,6 +4109,14 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args, options::OPT_fno_experimental_relative_cxx_abi_vtables, TargetCXXABI::usesRelativeVTables(T)); + // RTTI is on by default. + bool HasRTTI = Args.hasFlag(options::OPT_frtti, options::OPT_fno_rtti, true); + Opts.OmitVTableRTTI = + Args.hasFlag(options::OPT_fexperimental_omit_vtable_rtti, + options::OPT_fno_experimental_omit_vtable_rtti, false); + if (Opts.OmitVTableRTTI && HasRTTI) + Diags.Report(diag::err_drv_using_omit_rtti_component_without_no_rtti); + for (const auto &A : Args.getAllArgValues(OPT_fmacro_prefix_map_EQ)) { auto Split = StringRef(A).split('='); Opts.MacroPrefixMap.insert( diff --git a/clang/test/CodeGenCXX/OmitRTTIComponentABI/simple-vtable-definition.cpp b/clang/test/CodeGenCXX/OmitRTTIComponentABI/simple-vtable-definition.cpp new file mode 100644 index 0000000000000..99395ba0e05ec --- /dev/null +++ b/clang/test/CodeGenCXX/OmitRTTIComponentABI/simple-vtable-definition.cpp @@ -0,0 +1,32 @@ +/// Check that -fexperimental-omit-vtable-rtti omits the RTTI component from +/// the vtable. + +// RUN: %clang_cc1 %s -triple=aarch64-unknown-linux-gnu -fno-rtti -fexperimental-omit-vtable-rtti -S -o - -emit-llvm | FileCheck -check-prefixes=POINTER,RTTI %s +// RUN: %clang_cc1 %s -triple=aarch64-unknown-linux-gnu -fexperimental-relative-c++-abi-vtables -fno-rtti -fexperimental-omit-vtable-rtti -S -o - -emit-llvm | FileCheck -check-prefixes=RELATIVE,RTTI %s + +/// Normally, the vtable would contain at least three components: +/// - An offset to top +/// - A pointer to the RTTI struct +/// - A virtual function +/// +/// Now vtables should have just two components. +// POINTER: @_ZTV1A = unnamed_addr constant { [2 x ptr] } { [2 x ptr] [ptr null, ptr @_ZN1A3fooEv] }, align 8 +// RELATIVE: @_ZTV1A.local = private unnamed_addr constant { [2 x i32] } { [2 x i32] [i32 0, i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @_ZN1A3fooEv to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [2 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 1) to i64)) to i32)] }, align 4 +// RELATIVE: @_ZTV1A = unnamed_addr alias { [2 x i32] }, ptr @_ZTV1A.local + +/// None of these supplementary symbols should be emitted with -fno-rtti, but +/// as a sanity check lets make sure they're not emitted also. +// RTTI-NOT: @_ZTVN10__cxxabiv117__class_type_infoE +// RTTI-NOT: @_ZTS1A +// RTTI-NOT: @_ZTI1A + +class A { +public: + virtual void foo(); +}; + +void A::foo() {} + +void A_foo(A *a) { + a->foo(); +} diff --git a/clang/test/CodeGenCXX/OmitRTTIComponentABI/vbase-offset.cpp b/clang/test/CodeGenCXX/OmitRTTIComponentABI/vbase-offset.cpp new file mode 100644 index 0000000000000..d490cc2dbebe5 --- /dev/null +++ b/clang/test/CodeGenCXX/OmitRTTIComponentABI/vbase-offset.cpp @@ -0,0 +1,51 @@ +/// Check that the offset to top calculation is adjusted to account for the +/// omitted RTTI entry. + +// RUN: %clang_cc1 %s -triple=aarch64-unknown-linux-gnu -fexperimental-omit-vtable-rtti -fno-rtti -S -o - -emit-llvm | FileCheck -check-prefixes=POINTER %s +// RUN: %clang_cc1 %s -triple=aarch64-unknown-linux-gnu -fexperimental-relative-c++-abi-vtables -fexperimental-omit-vtable-rtti -fno-rtti -S -o - -emit-llvm | FileCheck -check-prefixes=RELATIVE %s + +/// Some important things to check: +/// - The n16 here represents the virtual thunk size. Normally this would be 24 +/// to represent 3 components (offset to top, RTTI component, vcall offset), +/// but since one 8-byte component is removed, this is now 16. +// POINTER-LABEL: @_ZTv0_n16_N7Derived1fEi( +// POINTER-NEXT: entry: +// POINTER: [[vtable:%.+]] = load ptr, ptr %this1, align 8 + +/// Same here - When getting the vbase offset, we subtract 2 pointer sizes +/// instead of 3. +// POINTER-NEXT: [[vbase_offset_ptr:%.+]] = getelementptr inbounds i8, ptr [[vtable]], i64 -16 +// POINTER-NEXT: [[vbase_offset:%.+]] = load i64, ptr [[vbase_offset_ptr]], align 8 +// POINTER-NEXT: [[adj_this:%.+]] = getelementptr inbounds i8, ptr %this1, i64 [[vbase_offset]] +// POINTER: [[call:%.+]] = tail call noundef i32 @_ZN7Derived1fEi(ptr noundef{{[^,]*}} [[adj_this]], i32 noundef {{.*}}) +// POINTER: ret i32 [[call]] + +/// For relative vtables, it's almost the same except the offset sizes are +/// halved. +// RELATIVE-LABEL: @_ZTv0_n8_N7Derived1fEi( +// RELATIVE-NEXT: entry: +// RELATIVE: [[vtable:%.+]] = load ptr, ptr %this1, align 8 +// RELATIVE-NEXT: [[vbase_offset_ptr:%.+]] = getelementptr inbounds i8, ptr [[vtable]], i64 -8 +// RELATIVE-NEXT: [[vbase_offset:%.+]] = load i32, ptr [[vbase_offset_ptr]], align 4 +// RELATIVE-NEXT: [[adj_this:%.+]] = getelementptr inbounds i8, ptr %this1, i32 [[vbase_offset]] +// RELATIVE: [[call:%.+]] = tail call noundef i32 @_ZN7Derived1fEi(ptr noundef{{[^,]*}} [[adj_this]], i32 noundef {{.*}}) +// RELATIVE: ret i32 [[call]] + +class Base { +public: + virtual int f(int x); + +private: + long x; +}; + +class Derived : public virtual Base { +public: + virtual int f(int x); + +private: + long y; +}; + +int Base::f(int x) { return x + 1; } +int Derived::f(int x) { return x + 2; } diff --git a/clang/test/CodeGenCXX/OmitRTTIComponentABI/vtable-layout.cpp b/clang/test/CodeGenCXX/OmitRTTIComponentABI/vtable-layout.cpp new file mode 100644 index 0000000000000..bcc9264f5e5b8 --- /dev/null +++ b/clang/test/CodeGenCXX/OmitRTTIComponentABI/vtable-layout.cpp @@ -0,0 +1,19 @@ +/// Ensure -fdump-vtable-layout omits the rtti component when passed -fexperimental-omit-vtable-rtti. + +// RUN: %clang_cc1 %s -triple=aarch64-unknown-linux-gnu -fno-rtti -fexperimental-omit-vtable-rtti -emit-llvm-only -fdump-vtable-layouts | FileCheck %s + +// CHECK: Vtable for 'A' (2 entries). +// CHECK-NEXT: 0 | offset_to_top (0) +// CHECK-NEXT: -- (A, 0) vtable address -- +// CHECK-NEXT: 1 | void A::foo() + +class A { +public: + virtual void foo(); +}; + +void A::foo() {} + +void A_foo(A *a) { + a->foo(); +} diff --git a/clang/test/Driver/omit-rtti-component-flag.cpp b/clang/test/Driver/omit-rtti-component-flag.cpp new file mode 100644 index 0000000000000..54b88a8775ef3 --- /dev/null +++ b/clang/test/Driver/omit-rtti-component-flag.cpp @@ -0,0 +1,5 @@ +// RUN: %clangxx --target=aarch64-unknown-linux -fno-rtti -Xclang -fexperimental-omit-vtable-rtti -c %s -### 2>&1 | FileCheck %s --check-prefix=OMIT +// RUN: %clangxx --target=aarch64-unknown-linux -fno-rtti -Xclang -fno-experimental-omit-vtable-rtti -c %s -### 2>&1 | FileCheck %s --check-prefix=NO-OMIT + +// OMIT: "-fexperimental-omit-vtable-rtti" +// NO-OMIT-NOT: "-fexperimental-omit-vtable-rtti" diff --git a/clang/test/Driver/omit-rtti-component-without-no-rtti.cpp b/clang/test/Driver/omit-rtti-component-without-no-rtti.cpp new file mode 100644 index 0000000000000..031094f91b3af --- /dev/null +++ b/clang/test/Driver/omit-rtti-component-without-no-rtti.cpp @@ -0,0 +1,13 @@ +/// Ensure that -fexperimental-omit-vtable-rtti is only allowed if rtti is +/// disabled. + +// RUN: not %clang -c -Xclang -fexperimental-omit-vtable-rtti %s 2>&1 | FileCheck -check-prefix=ERROR %s +// RUN: not %clang -c -Xclang -fexperimental-omit-vtable-rtti -frtti %s 2>&1 | FileCheck -check-prefix=ERROR %s +// RUN: not %clang -c -Xclang -fexperimental-omit-vtable-rtti -fno-rtti -frtti %s 2>&1 | FileCheck -check-prefix=ERROR %s + +// RUN: %clang -c -Xclang -fexperimental-omit-vtable-rtti -fno-rtti %s 2>&1 | FileCheck -check-prefix=NO-ERROR %s --allow-empty +// RUN: %clang -c -Xclang -fno-experimental-omit-vtable-rtti -frtti %s 2>&1 | FileCheck -check-prefix=NO-ERROR %s --allow-empty +// RUN: %clang -c -Xclang -fexperimental-omit-vtable-rtti -Xclang -fno-experimental-omit-vtable-rtti -frtti %s 2>&1 | FileCheck -check-prefix=NO-ERROR %s --allow-empty + +// ERROR: -fexperimental-omit-vtable-rtti call only be used with -fno-rtti +// NO-ERROR-NOT: -fexperimental-omit-vtable-rtti call only be used with -fno-rtti From c8c075e8769a615451653b37e5426eb5d7a1d16c Mon Sep 17 00:00:00 2001 From: Reid Kleckner Date: Wed, 13 Sep 2023 16:29:11 -0700 Subject: [PATCH 18/22] [MS] Follow up fix to pass aligned args to variadic x86_32 functions (#65692) MSVC allows users to pass structures with required alignments greater than 4 to variadic functions. It does not pass them indirectly to correctly align them. Instead, it passes them directly with the usual 4 byte stack alignment. This change implements the same logic in clang on the passing side. The receiving side (va_arg) never implemented any of this indirect logic, so it doesn't need to be updated. This issue pre-existed, but @aaron.ballman noticed it when we started passing structs containing aligned fields indirectly in D152752. --- clang/include/clang/CodeGen/CGFunctionInfo.h | 5 ++++ clang/lib/CodeGen/Targets/X86.cpp | 26 ++++++++++------- .../test/CodeGen/X86/x86_32-arguments-win32.c | 29 +++++++++++++++++++ 3 files changed, 49 insertions(+), 11 deletions(-) diff --git a/clang/include/clang/CodeGen/CGFunctionInfo.h b/clang/include/clang/CodeGen/CGFunctionInfo.h index b8971d5793f36..e388901b8a504 100644 --- a/clang/include/clang/CodeGen/CGFunctionInfo.h +++ b/clang/include/clang/CodeGen/CGFunctionInfo.h @@ -527,6 +527,11 @@ class RequiredArgs { return NumRequired; } + /// Return true if the argument at a given index is required. + bool isRequiredArg(unsigned argIdx) const { + return argIdx == ~0U || argIdx < NumRequired; + } + unsigned getOpaqueData() const { return NumRequired; } static RequiredArgs getFromOpaqueData(unsigned value) { if (value == ~0U) return All; diff --git a/clang/lib/CodeGen/Targets/X86.cpp b/clang/lib/CodeGen/Targets/X86.cpp index 2ea82397f1190..b2e2f6789cce3 100644 --- a/clang/lib/CodeGen/Targets/X86.cpp +++ b/clang/lib/CodeGen/Targets/X86.cpp @@ -87,12 +87,15 @@ static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) { /// Similar to llvm::CCState, but for Clang. struct CCState { CCState(CGFunctionInfo &FI) - : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()) {} + : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()), + Required(FI.getRequiredArgs()), IsDelegateCall(FI.isDelegateCall()) {} llvm::SmallBitVector IsPreassigned; unsigned CC = CallingConv::CC_C; unsigned FreeRegs = 0; unsigned FreeSSERegs = 0; + RequiredArgs Required; + bool IsDelegateCall = false; }; /// X86_32ABIInfo - The X86-32 ABI information. @@ -141,7 +144,7 @@ class X86_32ABIInfo : public ABIInfo { Class classify(QualType Ty) const; ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const; ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State, - bool isDelegateCall) const; + unsigned ArgIndex) const; /// Updates the number of available free registers, returns /// true if any registers were allocated. @@ -739,7 +742,7 @@ void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) c } ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State, - bool isDelegateCall) const { + unsigned ArgIndex) const { // FIXME: Set alignment on indirect arguments. bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall; bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall; @@ -754,7 +757,7 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State, CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); if (RAA == CGCXXABI::RAA_Indirect) { return getIndirectResult(Ty, false, State); - } else if (isDelegateCall) { + } else if (State.IsDelegateCall) { // Avoid having different alignments on delegate call args by always // setting the alignment to 4, which is what we do for inallocas. ABIArgInfo Res = getIndirectResult(Ty, false, State); @@ -812,11 +815,12 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State, } llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr; - // Pass over-aligned aggregates on Windows indirectly. This behavior was - // added in MSVC 2015. Use the required alignment from the record layout, - // since that may be less than the regular type alignment, and types with - // required alignment of less than 4 bytes are not passed indirectly. - if (IsWin32StructABI) { + // Pass over-aligned aggregates to non-variadic functions on Windows + // indirectly. This behavior was added in MSVC 2015. Use the required + // alignment from the record layout, since that may be less than the + // regular type alignment, and types with required alignment of less than 4 + // bytes are not passed indirectly. + if (IsWin32StructABI && State.Required.isRequiredArg(ArgIndex)) { unsigned AlignInBits = 0; if (RT) { const ASTRecordLayout &Layout = @@ -942,13 +946,13 @@ void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { bool UsedInAlloca = false; MutableArrayRef Args = FI.arguments(); - for (int I = 0, E = Args.size(); I < E; ++I) { + for (unsigned I = 0, E = Args.size(); I < E; ++I) { // Skip arguments that have already been assigned. if (State.IsPreassigned.test(I)) continue; Args[I].info = - classifyArgumentType(Args[I].type, State, FI.isDelegateCall()); + classifyArgumentType(Args[I].type, State, I); UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca); } diff --git a/clang/test/CodeGen/X86/x86_32-arguments-win32.c b/clang/test/CodeGen/X86/x86_32-arguments-win32.c index 53f6b5b79642c..5b81c43f4bbb8 100644 --- a/clang/test/CodeGen/X86/x86_32-arguments-win32.c +++ b/clang/test/CodeGen/X86/x86_32-arguments-win32.c @@ -128,3 +128,32 @@ void pass_underaligned_record_field() { // CHECK: call void @receive_falign1(i64 {{[^,)]*}}) // CHECK: call void @receive_falign4(i64 {{[^,)]*}}) // CHECK: call void @receive_falign8(ptr {{[^,)]*}}) + +struct __declspec(align(8)) BigAligned { + int big[5]; +}; + +void receive_aligned_variadic(int f, ...); +void pass_aligned_variadic() { + struct Align8 a8 = {42}; + struct FieldAlign8 f8 = {42}; + struct BigAligned big; + receive_aligned_variadic(1, a8, f8, big); +} +// MSVC doesn't pass aligned objects to variadic functions indirectly. +// CHECK-LABEL: define dso_local void @pass_aligned_variadic() +// CHECK: call void (i32, ...) @receive_aligned_variadic(i32 noundef 1, i64 %{{[^,]*}}, i64 %{{[^,]*}}, ptr noundef byval(%struct.BigAligned) align 4 %{{[^)]*}}) + + +void receive_fixed_align_variadic(struct BigAligned big, ...); +void pass_fixed_align_variadic() { + struct BigAligned big; + receive_fixed_align_variadic(big, 42); +} +// MSVC emits error C2719 and C3916 when receiving and passing arguments with +// required alignment greater than 4 to the fixed part of a variadic function +// prototype, but it's actually easier to just implement this functionality +// correctly in Clang than it is to be bug for bug compatible, so we pass such +// arguments indirectly. +// CHECK-LABEL: define dso_local void @pass_fixed_align_variadic() +// CHECK: call void (ptr, ...) @receive_fixed_align_variadic(ptr noundef %{{[^)]*}}, i32 noundef 42) From daa5da063ae8b39efa7368475f33db3313b41e30 Mon Sep 17 00:00:00 2001 From: Peter Klausler <35819229+klausler@users.noreply.github.com> Date: Wed, 13 Sep 2023 16:34:23 -0700 Subject: [PATCH 19/22] [flang] Don't blow up when combining mixed COMPLEX operations (#66235) Expression processing applies some straightforward rewriting of mixed complex/real and complex/integer operations to avoid having to promote the real/integer operand to complex and then perform a complex operation; for example, (a,b)+x becomes (a+x,b) rather than (a,b)+(x,0). But this can blow up the expression representation when the complex operand cannot be duplicated cheaply. So apply this technique only to complex operands that are appropriate to duplicate. Fixes https://github.com/llvm/llvm-project/issues/65142. --- flang/include/flang/Evaluate/tools.h | 10 - flang/lib/Evaluate/tools.cpp | 263 ++++++++++++++++++--------- flang/test/Evaluate/bug65142.f90 | 14 ++ 3 files changed, 189 insertions(+), 98 deletions(-) create mode 100644 flang/test/Evaluate/bug65142.f90 diff --git a/flang/include/flang/Evaluate/tools.h b/flang/include/flang/Evaluate/tools.h index 71fe1237efdde..69730286767ce 100644 --- a/flang/include/flang/Evaluate/tools.h +++ b/flang/include/flang/Evaluate/tools.h @@ -149,16 +149,6 @@ common::IfNoLvalue::category>>, A> AsCategoryExpr( Expr Parenthesize(Expr &&); -Expr GetComplexPart( - const Expr &, bool isImaginary = false); -Expr GetComplexPart(Expr &&, bool isImaginary = false); - -template -Expr MakeComplex(Expr> &&re, - Expr> &&im) { - return AsCategoryExpr(ComplexConstructor{std::move(re), std::move(im)}); -} - template constexpr bool IsNumericCategoryExpr() { if constexpr (common::HasMember) { return false; diff --git a/flang/lib/Evaluate/tools.cpp b/flang/lib/Evaluate/tools.cpp index aadbc0804b342..a4afc3db06022 100644 --- a/flang/lib/Evaluate/tools.cpp +++ b/flang/lib/Evaluate/tools.cpp @@ -180,8 +180,9 @@ std::optional> Package( std::optional>> &&catExpr) { if (catExpr) { return {AsGenericExpr(std::move(*catExpr))}; + } else { + return std::nullopt; } - return NoExpr(); } // Mixed REAL+INTEGER operations. REAL**INTEGER is a special case that @@ -204,6 +205,12 @@ std::optional> MixedRealLeft( std::move(rx.u))); } +template +Expr MakeComplex(Expr> &&re, + Expr> &&im) { + return AsCategoryExpr(ComplexConstructor{std::move(re), std::move(im)}); +} + std::optional> ConstructComplex( parser::ContextualMessages &messages, Expr &&real, Expr &&imaginary, int defaultRealKind) { @@ -228,24 +235,87 @@ std::optional> ConstructComplex( return std::nullopt; } -Expr GetComplexPart(const Expr &z, bool isImaginary) { - return common::visit( - [&](const auto &zk) { - static constexpr int kind{ResultType::kind}; - return AsCategoryExpr(ComplexComponent{isImaginary, zk}); - }, - z.u); -} +// Extracts the real or imaginary part of the result of a COMPLEX +// expression, when that expression is simple enough to be duplicated. +template struct ComplexPartExtractor { + template static std::optional> Get(const A &) { + return std::nullopt; + } -Expr GetComplexPart(Expr &&z, bool isImaginary) { - return common::visit( - [&](auto &&zk) { - static constexpr int kind{ResultType::kind}; - return AsCategoryExpr( - ComplexComponent{isImaginary, std::move(zk)}); - }, - z.u); -} + template + static std::optional> Get( + const Parentheses> &kz) { + if (auto x{Get(kz.left())}) { + return AsGenericExpr(AsSpecificExpr( + Parentheses>{std::move(*x)})); + } else { + return std::nullopt; + } + } + + template + static std::optional> Get( + const Negate> &kz) { + if (auto x{Get(kz.left())}) { + return AsGenericExpr(AsSpecificExpr( + Negate>{std::move(*x)})); + } else { + return std::nullopt; + } + } + + template + static std::optional> Get( + const Convert, TypeCategory::Complex> + &kz) { + if (auto x{Get(kz.left())}) { + return AsGenericExpr(AsSpecificExpr( + Convert, TypeCategory::Real>{ + AsGenericExpr(std::move(*x))})); + } else { + return std::nullopt; + } + } + + template + static std::optional> Get(const ComplexConstructor &kz) { + return GET_IMAGINARY ? Get(kz.right()) : Get(kz.left()); + } + + template + static std::optional> Get( + const Constant> &kz) { + if (auto cz{kz.GetScalarValue()}) { + return AsGenericExpr( + AsSpecificExpr(GET_IMAGINARY ? cz->AIMAG() : cz->REAL())); + } else { + return std::nullopt; + } + } + + template + static std::optional> Get( + const Designator> &kz) { + if (const auto *symbolRef{std::get_if(&kz.u)}) { + return AsGenericExpr(AsSpecificExpr( + Designator>{ComplexPart{ + DataRef{*symbolRef}, + GET_IMAGINARY ? ComplexPart::Part::IM : ComplexPart::Part::RE}})); + } else { + return std::nullopt; + } + } + + template + static std::optional> Get( + const Expr> &kz) { + return Get(kz.u); + } + + static std::optional> Get(const Expr &z) { + return Get(z.u); + } +}; // Convert REAL to COMPLEX of the same kind. Preserving the real operand kind // and then applying complex operand promotion rules allows the result to have @@ -266,19 +336,31 @@ Expr PromoteRealToComplex(Expr &&someX) { // corresponding COMPLEX+COMPLEX operation. template