Skip to content

Commit

Permalink
Merge pull request llvm#6 from Disasm/rustc-pick-min
Browse files Browse the repository at this point in the history
Apply patches for RISC-V 64-bit support
  • Loading branch information
alexcrichton committed Feb 12, 2019
2 parents 683d352 + 70b9bc4 commit 73a75d3
Show file tree
Hide file tree
Showing 35 changed files with 7,693 additions and 269 deletions.
25 changes: 25 additions & 0 deletions llvm/include/llvm/IR/IntrinsicsRISCV.td
Original file line number Diff line number Diff line change
Expand Up @@ -41,4 +41,29 @@ def int_riscv_masked_cmpxchg_i32
llvm_i32_ty, llvm_i32_ty],
[IntrArgMemOnly, NoCapture<0>]>;

class MaskedAtomicRMW64Intrinsic
: Intrinsic<[llvm_i64_ty],
[llvm_anyptr_ty, llvm_i64_ty, llvm_i64_ty, llvm_i64_ty],
[IntrArgMemOnly, NoCapture<0>]>;

class MaskedAtomicRMW64WithSextIntrinsic
: Intrinsic<[llvm_i64_ty],
[llvm_anyptr_ty, llvm_i64_ty, llvm_i64_ty, llvm_i64_ty,
llvm_i64_ty],
[IntrArgMemOnly, NoCapture<0>]>;

def int_riscv_masked_atomicrmw_xchg_i64 : MaskedAtomicRMW64Intrinsic;
def int_riscv_masked_atomicrmw_add_i64 : MaskedAtomicRMW64Intrinsic;
def int_riscv_masked_atomicrmw_sub_i64 : MaskedAtomicRMW64Intrinsic;
def int_riscv_masked_atomicrmw_nand_i64 : MaskedAtomicRMW64Intrinsic;
def int_riscv_masked_atomicrmw_max_i64 : MaskedAtomicRMW64WithSextIntrinsic;
def int_riscv_masked_atomicrmw_min_i64 : MaskedAtomicRMW64WithSextIntrinsic;
def int_riscv_masked_atomicrmw_umax_i64 : MaskedAtomicRMW64Intrinsic;
def int_riscv_masked_atomicrmw_umin_i64 : MaskedAtomicRMW64Intrinsic;

def int_riscv_masked_cmpxchg_i64
: Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty, llvm_i64_ty, llvm_i64_ty,
llvm_i64_ty, llvm_i64_ty],
[IntrArgMemOnly, NoCapture<0>]>;

} // TargetPrefix = "riscv"
81 changes: 67 additions & 14 deletions llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,9 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
case RISCV::PseudoAtomicLoadNand32:
return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 32,
NextMBBI);
case RISCV::PseudoAtomicLoadNand64:
return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 64,
NextMBBI);
case RISCV::PseudoMaskedAtomicSwap32:
return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xchg, true, 32,
NextMBBI);
Expand All @@ -111,6 +114,8 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
NextMBBI);
case RISCV::PseudoCmpXchg32:
return expandAtomicCmpXchg(MBB, MBBI, false, 32, NextMBBI);
case RISCV::PseudoCmpXchg64:
return expandAtomicCmpXchg(MBB, MBBI, false, 64, NextMBBI);
case RISCV::PseudoMaskedCmpXchg32:
return expandAtomicCmpXchg(MBB, MBBI, true, 32, NextMBBI);
}
Expand Down Expand Up @@ -152,12 +157,61 @@ static unsigned getSCForRMW32(AtomicOrdering Ordering) {
}
}

static unsigned getLRForRMW64(AtomicOrdering Ordering) {
switch (Ordering) {
default:
llvm_unreachable("Unexpected AtomicOrdering");
case AtomicOrdering::Monotonic:
return RISCV::LR_D;
case AtomicOrdering::Acquire:
return RISCV::LR_D_AQ;
case AtomicOrdering::Release:
return RISCV::LR_D;
case AtomicOrdering::AcquireRelease:
return RISCV::LR_D_AQ;
case AtomicOrdering::SequentiallyConsistent:
return RISCV::LR_D_AQ_RL;
}
}

static unsigned getSCForRMW64(AtomicOrdering Ordering) {
switch (Ordering) {
default:
llvm_unreachable("Unexpected AtomicOrdering");
case AtomicOrdering::Monotonic:
return RISCV::SC_D;
case AtomicOrdering::Acquire:
return RISCV::SC_D;
case AtomicOrdering::Release:
return RISCV::SC_D_RL;
case AtomicOrdering::AcquireRelease:
return RISCV::SC_D_RL;
case AtomicOrdering::SequentiallyConsistent:
return RISCV::SC_D_AQ_RL;
}
}

static unsigned getLRForRMW(AtomicOrdering Ordering, int Width) {
if (Width == 32)
return getLRForRMW32(Ordering);
if (Width == 64)
return getLRForRMW64(Ordering);
llvm_unreachable("Unexpected LR width\n");
}

static unsigned getSCForRMW(AtomicOrdering Ordering, int Width) {
if (Width == 32)
return getSCForRMW32(Ordering);
if (Width == 64)
return getSCForRMW64(Ordering);
llvm_unreachable("Unexpected SC width\n");
}

static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,
DebugLoc DL, MachineBasicBlock *ThisMBB,
MachineBasicBlock *LoopMBB,
MachineBasicBlock *DoneMBB,
AtomicRMWInst::BinOp BinOp, int Width) {
assert(Width == 32 && "RV64 atomic expansion currently unsupported");
unsigned DestReg = MI.getOperand(0).getReg();
unsigned ScratchReg = MI.getOperand(1).getReg();
unsigned AddrReg = MI.getOperand(2).getReg();
Expand All @@ -166,11 +220,11 @@ static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,
static_cast<AtomicOrdering>(MI.getOperand(4).getImm());

// .loop:
// lr.w dest, (addr)
// lr.[w|d] dest, (addr)
// binop scratch, dest, val
// sc.w scratch, scratch, (addr)
// sc.[w|d] scratch, scratch, (addr)
// bnez scratch, loop
BuildMI(LoopMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
BuildMI(LoopMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
.addReg(AddrReg);
switch (BinOp) {
default:
Expand All @@ -184,7 +238,7 @@ static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,
.addImm(-1);
break;
}
BuildMI(LoopMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
BuildMI(LoopMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
.addReg(AddrReg)
.addReg(ScratchReg);
BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
Expand Down Expand Up @@ -219,7 +273,7 @@ static void doMaskedAtomicBinOpExpansion(
const RISCVInstrInfo *TII, MachineInstr &MI, DebugLoc DL,
MachineBasicBlock *ThisMBB, MachineBasicBlock *LoopMBB,
MachineBasicBlock *DoneMBB, AtomicRMWInst::BinOp BinOp, int Width) {
assert(Width == 32 && "RV64 atomic expansion currently unsupported");
assert(Width == 32 && "Should never need to expand masked 64-bit operations");
unsigned DestReg = MI.getOperand(0).getReg();
unsigned ScratchReg = MI.getOperand(1).getReg();
unsigned AddrReg = MI.getOperand(2).getReg();
Expand Down Expand Up @@ -333,7 +387,7 @@ bool RISCVExpandPseudo::expandAtomicMinMaxOp(
MachineBasicBlock::iterator &NextMBBI) {
assert(IsMasked == true &&
"Should only need to expand masked atomic max/min");
assert(Width == 32 && "RV64 atomic expansion currently unsupported");
assert(Width == 32 && "Should never need to expand masked 64-bit operations");

MachineInstr &MI = *MBBI;
DebugLoc DL = MI.getDebugLoc();
Expand Down Expand Up @@ -451,7 +505,6 @@ bool RISCVExpandPseudo::expandAtomicMinMaxOp(
bool RISCVExpandPseudo::expandAtomicCmpXchg(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool IsMasked,
int Width, MachineBasicBlock::iterator &NextMBBI) {
assert(Width == 32 && "RV64 atomic expansion currently unsupported");
MachineInstr &MI = *MBBI;
DebugLoc DL = MI.getDebugLoc();
MachineFunction *MF = MBB.getParent();
Expand Down Expand Up @@ -483,18 +536,18 @@ bool RISCVExpandPseudo::expandAtomicCmpXchg(

if (!IsMasked) {
// .loophead:
// lr.w dest, (addr)
// lr.[w|d] dest, (addr)
// bne dest, cmpval, done
BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
.addReg(AddrReg);
BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE))
.addReg(DestReg)
.addReg(CmpValReg)
.addMBB(DoneMBB);
// .looptail:
// sc.w scratch, newval, (addr)
// sc.[w|d] scratch, newval, (addr)
// bnez scratch, loophead
BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
.addReg(AddrReg)
.addReg(NewValReg);
BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
Expand All @@ -507,7 +560,7 @@ bool RISCVExpandPseudo::expandAtomicCmpXchg(
// and scratch, dest, mask
// bne scratch, cmpval, done
unsigned MaskReg = MI.getOperand(5).getReg();
BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
.addReg(AddrReg);
BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), ScratchReg)
.addReg(DestReg)
Expand All @@ -525,7 +578,7 @@ bool RISCVExpandPseudo::expandAtomicCmpXchg(
// bnez scratch, loophead
insertMaskedMerge(TII, DL, LoopTailMBB, ScratchReg, DestReg, NewValReg,
MaskReg, ScratchReg);
BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
.addReg(AddrReg)
.addReg(ScratchReg);
BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
Expand Down
Loading

0 comments on commit 73a75d3

Please sign in to comment.