[llvm] [AArch64][SVE2] Do not emit RSHRNB for large shifts (PR #66672)

via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 18 10:12:36 PDT 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-aarch64

<details>
<summary>Changes</summary>

rshrnb's shift amount operand must be between 1-EltSizeInBits. This patch stops RSHRNB ISD nodes being emitted in this case

---
Full diff: https://github.com/llvm/llvm-project/pull/66672.diff


2 Files Affected:

- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.cpp (+3) 
- (modified) llvm/test/CodeGen/AArch64/sve2-intrinsics-combine-rshrnb.ll (+46) 


``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 5cc001c44e7a24f..4ef97e682b7bf64 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -20241,6 +20241,9 @@ static SDValue trySimplifySrlAddToRshrnb(SDValue Srl, SelectionDAG &DAG,
     return SDValue();
   unsigned ShiftValue = SrlOp1->getZExtValue();
 
+  if (ShiftValue > ResVT.getScalarSizeInBits())
+    return SDValue();
+
   SDValue Add = Srl->getOperand(0);
   if (Add->getOpcode() != ISD::ADD || !Add->hasOneUse())
     return SDValue();
diff --git a/llvm/test/CodeGen/AArch64/sve2-intrinsics-combine-rshrnb.ll b/llvm/test/CodeGen/AArch64/sve2-intrinsics-combine-rshrnb.ll
index f94daa45fb82a63..fe86a94e3035787 100644
--- a/llvm/test/CodeGen/AArch64/sve2-intrinsics-combine-rshrnb.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-intrinsics-combine-rshrnb.ll
@@ -142,6 +142,52 @@ define void @wide_add_shift_add_rshrnb_h(ptr %dest, i64 %index, <vscale x 8 x i3
   ret void
 }
 
+define void @wide_add_shift_add_rshrnb_d(ptr %dest, i64 %index, <vscale x 4 x i64> %arg1){
+; CHECK-LABEL: wide_add_shift_add_rshrnb_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    rshrnb z1.s, z1.d, #32
+; CHECK-NEXT:    rshrnb z0.s, z0.d, #32
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z1.s
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT:    add z0.s, z1.s, z0.s
+; CHECK-NEXT:    st1w { z0.s }, p0, [x0, x1, lsl #2]
+; CHECK-NEXT:    ret
+  %1 = add <vscale x 4 x i64> %arg1, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 2147483648, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
+  %2 = lshr <vscale x 4 x i64> %1, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 32, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
+  %3 = getelementptr inbounds i32, ptr %dest, i64 %index
+  %load = load <vscale x 4 x i32>, ptr %3, align 4
+  %4 = trunc <vscale x 4 x i64> %2 to <vscale x 4 x i32>
+  %5 = add <vscale x 4 x i32> %load, %4
+  store <vscale x 4 x i32> %5, ptr %3, align 4
+  ret void
+}
+
+; Do not emit rshrnb if the shift amount is larger than the dest eltsize in bits
+define void @neg_wide_add_shift_add_rshrnb_d(ptr %dest, i64 %index, <vscale x 4 x i64> %arg1){
+; CHECK-LABEL: neg_wide_add_shift_add_rshrnb_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z2.d, #0x800000000000
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    add z0.d, z0.d, z2.d
+; CHECK-NEXT:    add z1.d, z1.d, z2.d
+; CHECK-NEXT:    lsr z1.d, z1.d, #48
+; CHECK-NEXT:    lsr z0.d, z0.d, #48
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z1.s
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT:    add z0.s, z1.s, z0.s
+; CHECK-NEXT:    st1w { z0.s }, p0, [x0, x1, lsl #2]
+; CHECK-NEXT:    ret
+  %1 = add <vscale x 4 x i64> %arg1, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 140737488355328, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
+  %2 = lshr <vscale x 4 x i64> %1, shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 48, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
+  %3 = getelementptr inbounds i32, ptr %dest, i64 %index
+  %load = load <vscale x 4 x i32>, ptr %3, align 4
+  %4 = trunc <vscale x 4 x i64> %2 to <vscale x 4 x i32>
+  %5 = add <vscale x 4 x i32> %load, %4
+  store <vscale x 4 x i32> %5, ptr %3, align 4
+  ret void
+}
+
 define void @neg_trunc_lsr_add_op1_not_splat(ptr %ptr, ptr %dst, i64 %index, <vscale x 8 x i16> %add_op1){
 ; CHECK-LABEL: neg_trunc_lsr_add_op1_not_splat:
 ; CHECK:       // %bb.0:

``````````

</details>


https://github.com/llvm/llvm-project/pull/66672


More information about the llvm-commits mailing list