diff --git a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp index e89ec353487eef..3aa8ea3f514713 100644 --- a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp +++ b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -653,10 +653,6 @@ static bool findArgParts(Argument *Arg, const DataLayout &DL, AAResults &AAR, // check to see if the pointer is guaranteed to not be modified from entry of // the function to each of the load instructions. - // Because there could be several/many load instructions, remember which - // blocks we know to be transparent to the load. - df_iterator_default_set TranspBlocks; - for (LoadInst *Load : Loads) { // Check to see if the load is invalidated from the start of the block to // the load itself. @@ -670,7 +666,7 @@ static bool findArgParts(Argument *Arg, const DataLayout &DL, AAResults &AAR, // To do this, we perform a depth first search on the inverse CFG from the // loading block. for (BasicBlock *P : predecessors(BB)) { - for (BasicBlock *TranspBB : inverse_depth_first_ext(P, TranspBlocks)) + for (BasicBlock *TranspBB : inverse_depth_first(P)) if (AAR.canBasicBlockModify(*TranspBB, Loc)) return false; } diff --git a/llvm/test/Transforms/ArgumentPromotion/aliasing-and-non-aliasing-loads-with-clobber.ll b/llvm/test/Transforms/ArgumentPromotion/aliasing-and-non-aliasing-loads-with-clobber.ll index 69385a7ea51a74..1e1669b29b0db6 100644 --- a/llvm/test/Transforms/ArgumentPromotion/aliasing-and-non-aliasing-loads-with-clobber.ll +++ b/llvm/test/Transforms/ArgumentPromotion/aliasing-and-non-aliasing-loads-with-clobber.ll @@ -7,17 +7,14 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80: ; Test case for https://github.com/llvm/llvm-project/issues/84807. -; FIXME: Currently the loads from @callee are moved to @caller, even though -; the store in %then may aliases to load from %q. +; Make sure the loads from @callee are not moved to @caller, as the store +; in %then may aliases to load from %q. define i32 @caller1(i1 %c) { ; CHECK-LABEL: define i32 @caller1( ; CHECK-SAME: i1 [[C:%.*]]) { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[F_VAL:%.*]] = load i16, ptr @f, align 8 -; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr @f, i64 8 -; CHECK-NEXT: [[F_VAL1:%.*]] = load i64, ptr [[TMP0]], align 8 -; CHECK-NEXT: call void @callee1(i16 [[F_VAL]], i64 [[F_VAL1]], i1 [[C]]) +; CHECK-NEXT: call void @callee1(ptr noundef nonnull @f, i1 [[C]]) ; CHECK-NEXT: ret i32 0 ; entry: @@ -27,13 +24,16 @@ entry: define internal void @callee1(ptr nocapture noundef readonly %q, i1 %c) { ; CHECK-LABEL: define internal void @callee1( -; CHECK-SAME: i16 [[Q_0_VAL:%.*]], i64 [[Q_8_VAL:%.*]], i1 [[C:%.*]]) { +; CHECK-SAME: ptr nocapture noundef readonly [[Q:%.*]], i1 [[C:%.*]]) { ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 [[C]], label [[THEN:%.*]], label [[EXIT:%.*]] ; CHECK: then: ; CHECK-NEXT: store i16 123, ptr @f, align 8 ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: +; CHECK-NEXT: [[Q_0_VAL:%.*]] = load i16, ptr [[Q]], align 8 +; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i8, ptr [[Q]], i64 8 +; CHECK-NEXT: [[Q_8_VAL:%.*]] = load i64, ptr [[GEP_8]], align 8 ; CHECK-NEXT: call void @use(i16 [[Q_0_VAL]], i64 [[Q_8_VAL]]) ; CHECK-NEXT: ret void ;