[llvm] [RISCV] Merge int_riscv_masked_atomicrmw_*_i32/i64 intrinsics using llvm_anyint_ty. (PR #154845)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 22 09:43:54 PDT 2025
https://github.com/topperc updated https://github.com/llvm/llvm-project/pull/154845
>From dcd170f4576a1236f24a9d5f0e7c0a9fdb594c26 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Thu, 21 Aug 2025 14:22:37 -0700
Subject: [PATCH 1/2] [RISCV] Merge int_riscv_masked_atomicrmw_*_i32/i64
intrinsics using llvm_anyint_ty.
I think having separate intrinsics for RV32 and RV64 is making
some things more complicated than using type overloading.
This reduces the number of isel patterns in the .td file. They're
still expanded my HwMode so it doesn't reduce the binary size.
getIntrinsicForMaskedAtomicRMWBinOp no longer needs to look at XLen.
This makes adding the i64 versions to getTgtMemIntrinsic in #154805
unnecessary.
---
llvm/include/llvm/IR/IntrinsicsRISCV.td | 58 ++++------
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 111 ++++++++------------
llvm/lib/Target/RISCV/RISCVInstrInfoA.td | 57 +++-------
3 files changed, 80 insertions(+), 146 deletions(-)
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 80c5c7035c19b..f656d49f54992 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -15,8 +15,7 @@
// Atomic Intrinsics have multiple versions for different access widths, which
// all follow one of the following signatures (depending on how many arguments
-// they require). We carefully instantiate only specific versions of these for
-// specific integer widths, rather than using `llvm_anyint_ty`.
+// they require).
//
// In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the
// canonical names, and the intrinsics used in the code will have a name
@@ -25,52 +24,39 @@
let TargetPrefix = "riscv" in {
- // T @llvm.<name>.T.<p>(any*, T, T, T imm);
- class RISCVMaskedAtomicRMWFourArg<LLVMType itype>
- : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
+ // T @llvm.<name>.<i>.<p>(any*, T, T, T imm);
+ class RISCVMaskedAtomicRMWFourArg
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyptr_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>],
[IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;
- // T @llvm.<name>.T.<p>(any*, T, T, T, T imm);
- class RISCVMaskedAtomicRMWFiveArg<LLVMType itype>
- : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype],
+ // T @llvm.<name>.<i>.<p>(any*, T, T, T, T imm);
+ class RISCVMaskedAtomicRMWFiveArg
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyptr_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMMatchType<0>],
[IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>;
- // We define 32-bit and 64-bit variants of the above, where T stands for i32
- // or i64 respectively:
- multiclass RISCVMaskedAtomicRMWFourArgIntrinsics {
- // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
- def _i32 : RISCVMaskedAtomicRMWFourArg<llvm_i32_ty>;
- // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
- def _i64 : RISCVMaskedAtomicRMWFourArg<llvm_i64_ty>;
- }
-
- multiclass RISCVMaskedAtomicRMWFiveArgIntrinsics {
- // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm);
- def _i32 : RISCVMaskedAtomicRMWFiveArg<llvm_i32_ty>;
- // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
- def _i64 : RISCVMaskedAtomicRMWFiveArg<llvm_i64_ty>;
- }
-
// These intrinsics are intended only for internal compiler use (i.e. as
// part of AtomicExpandpass via the emitMaskedAtomic*Intrinsic hooks). Their
// names and semantics could change in the future.
- // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>(
+ // @llvm.riscv.masked.atomicrmw.*.<i>.<p>(
// ptr addr, ixlen oparg, ixlen mask, ixlenimm ordering)
- defm int_riscv_masked_atomicrmw_xchg : RISCVMaskedAtomicRMWFourArgIntrinsics;
- defm int_riscv_masked_atomicrmw_add : RISCVMaskedAtomicRMWFourArgIntrinsics;
- defm int_riscv_masked_atomicrmw_sub : RISCVMaskedAtomicRMWFourArgIntrinsics;
- defm int_riscv_masked_atomicrmw_nand : RISCVMaskedAtomicRMWFourArgIntrinsics;
- defm int_riscv_masked_atomicrmw_umax : RISCVMaskedAtomicRMWFourArgIntrinsics;
- defm int_riscv_masked_atomicrmw_umin : RISCVMaskedAtomicRMWFourArgIntrinsics;
+ def int_riscv_masked_atomicrmw_xchg : RISCVMaskedAtomicRMWFourArg;
+ def int_riscv_masked_atomicrmw_add : RISCVMaskedAtomicRMWFourArg;
+ def int_riscv_masked_atomicrmw_sub : RISCVMaskedAtomicRMWFourArg;
+ def int_riscv_masked_atomicrmw_nand : RISCVMaskedAtomicRMWFourArg;
+ def int_riscv_masked_atomicrmw_umax : RISCVMaskedAtomicRMWFourArg;
+ def int_riscv_masked_atomicrmw_umin : RISCVMaskedAtomicRMWFourArg;
// Signed min and max need an extra operand to do sign extension with.
- // @llvm.riscv.masked.atomicrmw.{max,min}.{i32,i64}.<p>(
+ // @llvm.riscv.masked.atomicrmw.{max,min}.<i>.<p>(
// ptr addr, ixlen oparg, ixlen mask, ixlen shamt, ixlenimm ordering)
- defm int_riscv_masked_atomicrmw_max : RISCVMaskedAtomicRMWFiveArgIntrinsics;
- defm int_riscv_masked_atomicrmw_min : RISCVMaskedAtomicRMWFiveArgIntrinsics;
+ def int_riscv_masked_atomicrmw_max : RISCVMaskedAtomicRMWFiveArg;
+ def int_riscv_masked_atomicrmw_min : RISCVMaskedAtomicRMWFiveArg;
- // @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>(
+ // @llvm.riscv.masked.cmpxchg.<i>.<p>(
// ptr addr, ixlen cmpval, ixlen newval, ixlen mask, ixlenimm ordering)
- defm int_riscv_masked_cmpxchg : RISCVMaskedAtomicRMWFiveArgIntrinsics;
+ def int_riscv_masked_cmpxchg : RISCVMaskedAtomicRMWFiveArg;
} // TargetPrefix = "riscv"
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 4a1db80076530..60dfd7b177ac9 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1800,15 +1800,15 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
switch (Intrinsic) {
default:
return false;
- case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
- case Intrinsic::riscv_masked_atomicrmw_add_i32:
- case Intrinsic::riscv_masked_atomicrmw_sub_i32:
- case Intrinsic::riscv_masked_atomicrmw_nand_i32:
- case Intrinsic::riscv_masked_atomicrmw_max_i32:
- case Intrinsic::riscv_masked_atomicrmw_min_i32:
- case Intrinsic::riscv_masked_atomicrmw_umax_i32:
- case Intrinsic::riscv_masked_atomicrmw_umin_i32:
- case Intrinsic::riscv_masked_cmpxchg_i32:
+ case Intrinsic::riscv_masked_atomicrmw_xchg:
+ case Intrinsic::riscv_masked_atomicrmw_add:
+ case Intrinsic::riscv_masked_atomicrmw_sub:
+ case Intrinsic::riscv_masked_atomicrmw_nand:
+ case Intrinsic::riscv_masked_atomicrmw_max:
+ case Intrinsic::riscv_masked_atomicrmw_min:
+ case Intrinsic::riscv_masked_atomicrmw_umax:
+ case Intrinsic::riscv_masked_atomicrmw_umin:
+ case Intrinsic::riscv_masked_cmpxchg:
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::i32;
Info.ptrVal = I.getArgOperand(0);
@@ -21478,24 +21478,23 @@ unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
switch (IntNo) {
default:
break;
- case Intrinsic::riscv_masked_atomicrmw_xchg_i64:
- case Intrinsic::riscv_masked_atomicrmw_add_i64:
- case Intrinsic::riscv_masked_atomicrmw_sub_i64:
- case Intrinsic::riscv_masked_atomicrmw_nand_i64:
- case Intrinsic::riscv_masked_atomicrmw_max_i64:
- case Intrinsic::riscv_masked_atomicrmw_min_i64:
- case Intrinsic::riscv_masked_atomicrmw_umax_i64:
- case Intrinsic::riscv_masked_atomicrmw_umin_i64:
- case Intrinsic::riscv_masked_cmpxchg_i64:
+ case Intrinsic::riscv_masked_atomicrmw_xchg:
+ case Intrinsic::riscv_masked_atomicrmw_add:
+ case Intrinsic::riscv_masked_atomicrmw_sub:
+ case Intrinsic::riscv_masked_atomicrmw_nand:
+ case Intrinsic::riscv_masked_atomicrmw_max:
+ case Intrinsic::riscv_masked_atomicrmw_min:
+ case Intrinsic::riscv_masked_atomicrmw_umax:
+ case Intrinsic::riscv_masked_atomicrmw_umin:
+ case Intrinsic::riscv_masked_cmpxchg:
// riscv_masked_{atomicrmw_*,cmpxchg} intrinsics represent an emulated
// narrow atomic operation. These are implemented using atomic
// operations at the minimum supported atomicrmw/cmpxchg width whose
// result is then sign extended to XLEN. With +A, the minimum width is
// 32 for both 64 and 32.
- assert(Subtarget.getXLen() == 64);
assert(getMinCmpXchgSizeInBits() == 32);
assert(Subtarget.hasStdExtA());
- return 33;
+ return Op.getValueSizeInBits() - 31;
}
break;
}
@@ -23786,53 +23785,26 @@ RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
static Intrinsic::ID
getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
- if (XLen == 32) {
- switch (BinOp) {
- default:
- llvm_unreachable("Unexpected AtomicRMW BinOp");
- case AtomicRMWInst::Xchg:
- return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
- case AtomicRMWInst::Add:
- return Intrinsic::riscv_masked_atomicrmw_add_i32;
- case AtomicRMWInst::Sub:
- return Intrinsic::riscv_masked_atomicrmw_sub_i32;
- case AtomicRMWInst::Nand:
- return Intrinsic::riscv_masked_atomicrmw_nand_i32;
- case AtomicRMWInst::Max:
- return Intrinsic::riscv_masked_atomicrmw_max_i32;
- case AtomicRMWInst::Min:
- return Intrinsic::riscv_masked_atomicrmw_min_i32;
- case AtomicRMWInst::UMax:
- return Intrinsic::riscv_masked_atomicrmw_umax_i32;
- case AtomicRMWInst::UMin:
- return Intrinsic::riscv_masked_atomicrmw_umin_i32;
- }
- }
-
- if (XLen == 64) {
- switch (BinOp) {
- default:
- llvm_unreachable("Unexpected AtomicRMW BinOp");
- case AtomicRMWInst::Xchg:
- return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
- case AtomicRMWInst::Add:
- return Intrinsic::riscv_masked_atomicrmw_add_i64;
- case AtomicRMWInst::Sub:
- return Intrinsic::riscv_masked_atomicrmw_sub_i64;
- case AtomicRMWInst::Nand:
- return Intrinsic::riscv_masked_atomicrmw_nand_i64;
- case AtomicRMWInst::Max:
- return Intrinsic::riscv_masked_atomicrmw_max_i64;
- case AtomicRMWInst::Min:
- return Intrinsic::riscv_masked_atomicrmw_min_i64;
- case AtomicRMWInst::UMax:
- return Intrinsic::riscv_masked_atomicrmw_umax_i64;
- case AtomicRMWInst::UMin:
- return Intrinsic::riscv_masked_atomicrmw_umin_i64;
- }
+ switch (BinOp) {
+ default:
+ llvm_unreachable("Unexpected AtomicRMW BinOp");
+ case AtomicRMWInst::Xchg:
+ return Intrinsic::riscv_masked_atomicrmw_xchg;
+ case AtomicRMWInst::Add:
+ return Intrinsic::riscv_masked_atomicrmw_add;
+ case AtomicRMWInst::Sub:
+ return Intrinsic::riscv_masked_atomicrmw_sub;
+ case AtomicRMWInst::Nand:
+ return Intrinsic::riscv_masked_atomicrmw_nand;
+ case AtomicRMWInst::Max:
+ return Intrinsic::riscv_masked_atomicrmw_max;
+ case AtomicRMWInst::Min:
+ return Intrinsic::riscv_masked_atomicrmw_min;
+ case AtomicRMWInst::UMax:
+ return Intrinsic::riscv_masked_atomicrmw_umax;
+ case AtomicRMWInst::UMin:
+ return Intrinsic::riscv_masked_atomicrmw_umin;
}
-
- llvm_unreachable("Unexpected XLen\n");
}
Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
@@ -23857,7 +23829,7 @@ Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
unsigned XLen = Subtarget.getXLen();
Value *Ordering =
Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
- Type *Tys[] = {AlignedAddr->getType()};
+ Type *Tys[] = {Builder.getIntNTy(XLen), AlignedAddr->getType()};
Function *LrwOpScwLoop = Intrinsic::getOrInsertDeclaration(
AI->getModule(),
getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
@@ -23913,14 +23885,13 @@ Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
unsigned XLen = Subtarget.getXLen();
Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
- Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
+ Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg;
if (XLen == 64) {
CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
- CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
}
- Type *Tys[] = {AlignedAddr->getType()};
+ Type *Tys[] = {Builder.getIntNTy(XLen), AlignedAddr->getType()};
Value *Result = Builder.CreateIntrinsic(
CmpXchgIntrID, Tys, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
if (XLen == 64)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
index e307f1a595880..645dbe896c6fd 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
@@ -278,12 +278,14 @@ class PseudoMaskedAMOUMinUMax
}
class PseudoMaskedAMOPat<Intrinsic intrin, Pseudo AMOInst>
- : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering),
+ : Pat<(XLenVT (intrin (XLenVT GPR:$addr), (XLenVT GPR:$incr),
+ (XLenVT GPR:$mask), (XLenVT timm:$ordering))),
(AMOInst GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering)>;
class PseudoMaskedAMOMinMaxPat<Intrinsic intrin, Pseudo AMOInst>
- : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
- timm:$ordering),
+ : Pat<(XLenVT (intrin (XLenVT GPR:$addr), (XLenVT GPR:$incr),
+ (XLenVT GPR:$mask), (XLenVT GPR:$shiftamt),
+ (XLenVT timm:$ordering))),
(AMOInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
timm:$ordering)>;
@@ -320,26 +322,24 @@ let Size = 36 in {
def PseudoMaskedAtomicLoadUMax32 : PseudoMaskedAMOUMinUMax;
def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMOUMinUMax;
}
-} // Predicates = [HasStdExtA]
-let Predicates = [HasStdExtA, IsRV32] in {
-def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i32,
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg,
PseudoMaskedAtomicSwap32>;
-def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i32,
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add,
PseudoMaskedAtomicLoadAdd32>;
-def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i32,
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub,
PseudoMaskedAtomicLoadSub32>;
-def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i32,
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand,
PseudoMaskedAtomicLoadNand32>;
-def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i32,
+def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max,
PseudoMaskedAtomicLoadMax32>;
-def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i32,
+def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min,
PseudoMaskedAtomicLoadMin32>;
-def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i32,
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax,
PseudoMaskedAtomicLoadUMax32>;
-def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i32,
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin,
PseudoMaskedAtomicLoadUMin32>;
-} // Predicates = [HasStdExtA, IsRV32]
+} // Predicates = [HasStdExtA]
let Predicates = [HasStdExtA, IsRV64] in {
@@ -357,23 +357,6 @@ def : Pat<(i64 (atomic_load_nand_i64_acq_rel GPR:$addr, GPR:$incr)),
(PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 6)>;
def : Pat<(i64 (atomic_load_nand_i64_seq_cst GPR:$addr, GPR:$incr)),
(PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 7)>;
-
-def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i64,
- PseudoMaskedAtomicSwap32>;
-def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i64,
- PseudoMaskedAtomicLoadAdd32>;
-def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i64,
- PseudoMaskedAtomicLoadSub32>;
-def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i64,
- PseudoMaskedAtomicLoadNand32>;
-def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i64,
- PseudoMaskedAtomicLoadMax32>;
-def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i64,
- PseudoMaskedAtomicLoadMin32>;
-def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i64,
- PseudoMaskedAtomicLoadUMax32>;
-def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i64,
- PseudoMaskedAtomicLoadUMin32>;
} // Predicates = [HasStdExtA, IsRV64]
@@ -427,15 +410,9 @@ def PseudoMaskedCmpXchg32
let Size = 32;
}
-def : Pat<(int_riscv_masked_cmpxchg_i32
- GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
+def : Pat<(XLenVT (int_riscv_masked_cmpxchg
+ (XLenVT GPR:$addr), (XLenVT GPR:$cmpval), (XLenVT GPR:$newval),
+ (XLenVT GPR:$mask), (XLenVT timm:$ordering))),
(PseudoMaskedCmpXchg32
GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>;
} // Predicates = [HasStdExtA]
-
-let Predicates = [HasStdExtA, IsRV64] in {
-def : Pat<(int_riscv_masked_cmpxchg_i64
- GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
- (PseudoMaskedCmpXchg32
- GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>;
-} // Predicates = [HasStdExtA, IsRV64]
>From 8152d7908216e33c1501ca78605a7c0b6e6e69a5 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Thu, 21 Aug 2025 21:50:24 -0700
Subject: [PATCH 2/2] fixup! Add comment
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 60dfd7b177ac9..7d4dd148219fb 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1809,6 +1809,11 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
case Intrinsic::riscv_masked_atomicrmw_umax:
case Intrinsic::riscv_masked_atomicrmw_umin:
case Intrinsic::riscv_masked_cmpxchg:
+ // riscv_masked_{atomicrmw_*,cmpxchg} intrinsics represent an emulated
+ // narrow atomic operation. These will be expanded to an LR/SC loop that
+ // reads/writes to/from an aligned 4 byte location. And, or, shift, etc.
+ // will be used to modify the appropriate part of the 4 byte data and
+ // preserve the rest.
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::i32;
Info.ptrVal = I.getArgOperand(0);
More information about the llvm-commits
mailing list