[llvm] ddd9485 - [MVE] Don't distribute add of vecreduce if it has more than one use

John Brawn via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 11 06:13:37 PDT 2022


Author: John Brawn
Date: 2022-07-11T14:13:29+01:00
New Revision: ddd9485129b53a1ca53ebe06f59dd3c7fe2dc333

URL: https://github.com/llvm/llvm-project/commit/ddd9485129b53a1ca53ebe06f59dd3c7fe2dc333
DIFF: https://github.com/llvm/llvm-project/commit/ddd9485129b53a1ca53ebe06f59dd3c7fe2dc333.diff

LOG: [MVE] Don't distribute add of vecreduce if it has more than one use

If the add has more than one use then applying the transformation
won't cause it to be removed, so we can end up applying it again
causing an infinite loop.

Differential Revision: https://reviews.llvm.org/D129361

Added: 
    llvm/test/CodeGen/Thumb2/mve-vecreduce-add-combine.ll

Modified: 
    llvm/lib/Target/ARM/ARMISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index b16e0d94bf98..e6be93e6480a 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -13358,14 +13358,14 @@ static SDValue TryDistrubutionADDVecReduce(SDNode *N, SelectionDAG &DAG) {
     // to make better use of vaddva style instructions.
     if (VT == MVT::i32 && N1.getOpcode() == ISD::ADD && !IsVecReduce(N0) &&
         IsVecReduce(N1.getOperand(0)) && IsVecReduce(N1.getOperand(1)) &&
-        !isa<ConstantSDNode>(N0)) {
+        !isa<ConstantSDNode>(N0) && N1->hasOneUse()) {
       SDValue Add0 = DAG.getNode(ISD::ADD, dl, VT, N0, N1.getOperand(0));
       return DAG.getNode(ISD::ADD, dl, VT, Add0, N1.getOperand(1));
     }
     // And turn add(add(A, reduce(B)), add(C, reduce(D))) ->
     //   add(add(add(A, C), reduce(B)), reduce(D))
     if (VT == MVT::i32 && N0.getOpcode() == ISD::ADD &&
-        N1.getOpcode() == ISD::ADD) {
+        N1.getOpcode() == ISD::ADD && N0->hasOneUse() && N1->hasOneUse()) {
       unsigned N0RedOp = 0;
       if (!IsVecReduce(N0.getOperand(N0RedOp))) {
         N0RedOp = 1;
@@ -13432,7 +13432,7 @@ static SDValue TryDistrubutionADDVecReduce(SDNode *N, SelectionDAG &DAG) {
     };
 
     SDValue X;
-    if (N0.getOpcode() == ISD::ADD) {
+    if (N0.getOpcode() == ISD::ADD && N0->hasOneUse()) {
       if (IsVecReduce(N0.getOperand(0)) && IsVecReduce(N0.getOperand(1))) {
         int IsBefore = IsKnownOrderedLoad(N0.getOperand(0).getOperand(0),
                                          N0.getOperand(1).getOperand(0));

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-add-combine.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-add-combine.ll
new file mode 100644
index 000000000000..16abf166c2c2
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-add-combine.ll
@@ -0,0 +1,103 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
+
+define arm_aapcs_vfpcc i32 @test1(ptr %ptr, i32 %arg1, <4 x i32> %arg2, <4 x i32> %arg3) {
+; CHECK-LABEL: test1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vaddv.u32 r2, q1
+; CHECK-NEXT:    vaddva.u32 r2, q0
+; CHECK-NEXT:    str r2, [r0]
+; CHECK-NEXT:    adds r0, r2, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %reduce1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %arg2)
+  %reduce2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %arg3)
+  %add1 = add i32 %reduce1, %reduce2
+  store i32 %add1, i32* %ptr, align 4
+  %add2 = add i32 %add1, %arg1
+  ret i32 %add2
+}
+
+define arm_aapcs_vfpcc i32 @test2(ptr %ptr, i32 %arg1, <4 x i32> %arg2, <4 x i32> %arg3) {
+; CHECK-LABEL: test2:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vaddv.u32 r2, q1
+; CHECK-NEXT:    vaddva.u32 r2, q0
+; CHECK-NEXT:    str r2, [r0]
+; CHECK-NEXT:    adds r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %reduce1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %arg2)
+  %reduce2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %arg3)
+  %add1 = add i32 %reduce1, %reduce2
+  store i32 %add1, i32* %ptr, align 4
+  %add2 = add i32 %arg1, %add1
+  ret i32 %add2
+}
+
+define arm_aapcs_vfpcc i32 @test3(ptr %ptr, i32 %arg1, i32 %arg2, <4 x i32> %arg3, <4 x i32> %arg4) {
+; CHECK-LABEL: test3:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mov r12, r1
+; CHECK-NEXT:    vaddva.u32 r2, q1
+; CHECK-NEXT:    vaddva.u32 r12, q0
+; CHECK-NEXT:    str.w r12, [r0]
+; CHECK-NEXT:    add.w r0, r12, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %reduce1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %arg3)
+  %reduce2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %arg4)
+  %add1 = add i32 %arg1, %reduce1
+  store i32 %add1, i32* %ptr, align 4
+  %add2 = add i32 %arg2, %reduce2
+  %add3 = add i32 %add1, %add2
+  ret i32 %add3
+}
+
+define arm_aapcs_vfpcc i32 @test4(ptr %ptr, i32 %arg1, ptr %arg2) {
+; CHECK-LABEL: test4:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    mov r12, r1
+; CHECK-NEXT:    vaddva.u32 r12, q0
+; CHECK-NEXT:    vldrw.u32 q0, [r2, #4]
+; CHECK-NEXT:    str.w r12, [r0]
+; CHECK-NEXT:    vaddva.u32 r12, q0
+; CHECK-NEXT:    mov r0, r12
+; CHECK-NEXT:    bx lr
+entry:
+  %load1 = load <4 x i32>, <4 x i32>* %arg2, align 4
+  %gep = getelementptr inbounds i32, i32* %arg2, i32 1
+  %load2 = load <4 x i32>, <4 x i32>* %gep, align 4
+  %reduce1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %load1)
+  %reduce2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %load2)
+  %add1 = add i32 %arg1, %reduce1
+  store i32 %add1, i32* %ptr, align 4
+  %add2 = add i32 %add1, %reduce2
+  ret i32 %add2
+}
+
+define arm_aapcs_vfpcc i32 @test5(ptr %ptr, i32 %arg1, ptr %arg2) {
+; CHECK-LABEL: test5:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q0, [r2, #4]
+; CHECK-NEXT:    mov r12, r1
+; CHECK-NEXT:    vaddva.u32 r12, q0
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    str.w r12, [r0]
+; CHECK-NEXT:    vaddva.u32 r12, q0
+; CHECK-NEXT:    mov r0, r12
+; CHECK-NEXT:    bx lr
+entry:
+  %load1 = load <4 x i32>, <4 x i32>* %arg2, align 4
+  %gep = getelementptr inbounds i32, i32* %arg2, i32 1
+  %load2 = load <4 x i32>, <4 x i32>* %gep, align 4
+  %reduce1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %load1)
+  %reduce2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %load2)
+  %add1 = add i32 %arg1, %reduce2
+  store i32 %add1, i32* %ptr, align 4
+  %add2 = add i32 %add1, %reduce1
+  ret i32 %add2
+}
+
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)


        


More information about the llvm-commits mailing list