[PATCH] D130191: [RISCV] Teach ComputeNumSignBitsForTargetNode about masked atomic intrinsics

Alex Bradbury via Phabricator via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 3 05:39:02 PDT 2022


asb updated this revision to Diff 449642.
asb marked an inline comment as done.
asb added a comment.

Address review comments.


CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D130191/new/

https://reviews.llvm.org/D130191

Files:
  llvm/lib/Target/RISCV/RISCVISelLowering.cpp
  llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll
  llvm/test/CodeGen/RISCV/atomic-signext.ll


Index: llvm/test/CodeGen/RISCV/atomic-signext.ll
===================================================================
--- llvm/test/CodeGen/RISCV/atomic-signext.ll
+++ llvm/test/CodeGen/RISCV/atomic-signext.ll
@@ -3904,7 +3904,6 @@
 ; RV64IA-NEXT:    bnez a5, .LBB48_1
 ; RV64IA-NEXT:  .LBB48_3:
 ; RV64IA-NEXT:    and a0, a2, a4
-; RV64IA-NEXT:    sext.w a0, a0
 ; RV64IA-NEXT:    xor a0, a1, a0
 ; RV64IA-NEXT:    seqz a0, a0
 ; RV64IA-NEXT:    ret
@@ -4077,7 +4076,6 @@
 ; RV64IA-NEXT:    bnez a4, .LBB50_1
 ; RV64IA-NEXT:  .LBB50_3:
 ; RV64IA-NEXT:    and a0, a2, a5
-; RV64IA-NEXT:    sext.w a0, a0
 ; RV64IA-NEXT:    xor a0, a1, a0
 ; RV64IA-NEXT:    seqz a0, a0
 ; RV64IA-NEXT:    ret
Index: llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll
===================================================================
--- llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll
+++ llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll
@@ -130,7 +130,6 @@
 ; RV64IA-NEXT:  .LBB2_5: # %do_cmpxchg
 ; RV64IA-NEXT:    # in Loop: Header=BB2_1 Depth=1
 ; RV64IA-NEXT:    and a4, a4, a0
-; RV64IA-NEXT:    sext.w a4, a4
 ; RV64IA-NEXT:    bne a1, a4, .LBB2_1
 ; RV64IA-NEXT:  # %bb.2: # %exit
 ; RV64IA-NEXT:    ret
@@ -207,7 +206,6 @@
 ; RV64IA-NEXT:  .LBB3_5: # %do_cmpxchg
 ; RV64IA-NEXT:    # in Loop: Header=BB3_1 Depth=1
 ; RV64IA-NEXT:    and a4, a4, a0
-; RV64IA-NEXT:    sext.w a4, a4
 ; RV64IA-NEXT:    beq a1, a4, .LBB3_1
 ; RV64IA-NEXT:  # %bb.2: # %exit
 ; RV64IA-NEXT:    ret
Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp
===================================================================
--- llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -9882,6 +9882,31 @@
       return XLen - EltBits + 1;
     break;
   }
+  case ISD::INTRINSIC_W_CHAIN: {
+    unsigned IntNo = Op.getConstantOperandVal(1);
+    switch (IntNo) {
+    default:
+      break;
+    case Intrinsic::riscv_masked_atomicrmw_xchg_i64:
+    case Intrinsic::riscv_masked_atomicrmw_add_i64:
+    case Intrinsic::riscv_masked_atomicrmw_sub_i64:
+    case Intrinsic::riscv_masked_atomicrmw_nand_i64:
+    case Intrinsic::riscv_masked_atomicrmw_max_i64:
+    case Intrinsic::riscv_masked_atomicrmw_min_i64:
+    case Intrinsic::riscv_masked_atomicrmw_umax_i64:
+    case Intrinsic::riscv_masked_atomicrmw_umin_i64:
+    case Intrinsic::riscv_masked_cmpxchg_i64:
+      // riscv_masked_{atomicrmw_*,cmpxchg} intrinsics represent an emulated
+      // narrow atomic operation. These are implemented using atomic
+      // operations at the minimum supported atomicrmw/cmpxchg width whose
+      // result is then sign extended to XLEN. With +A, the minimum width is
+      // 32 for both 64 and 32.
+      assert(Subtarget.getXLen() == 64);
+      assert(getMinCmpXchgSizeInBits() == 32);
+      assert(Subtarget.hasStdExtA());
+      return 33;
+    }
+  }
   }
 
   return 1;


-------------- next part --------------
A non-text attachment was scrubbed...
Name: D130191.449642.patch
Type: text/x-patch
Size: 2912 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20220803/c1d7515e/attachment.bin>


More information about the llvm-commits mailing list