[llvm] 31ec721 - [llvm][CodeGen] DAG Combiner folds for vscale.

Francesco Petrogalli via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 21 10:04:02 PST 2020


Author: Francesco Petrogalli
Date: 2020-02-21T18:03:12Z
New Revision: 31ec721516b5ed36f7dbed180a903e269f29716d

URL: https://github.com/llvm/llvm-project/commit/31ec721516b5ed36f7dbed180a903e269f29716d
DIFF: https://github.com/llvm/llvm-project/commit/31ec721516b5ed36f7dbed180a903e269f29716d.diff

LOG: [llvm][CodeGen] DAG Combiner folds for vscale.

Summary:
This patch simplifies the DAGs generated when using the intrinsic `@llvm.vscale.*` as follows:

* Fold (add (vscale * C0), (vscale * C1)) to (vscale * (C0 + C1)).
* Canonicalize (sub X, (vscale * C)) to (add X,  (vscale * -C)).
* Fold (mul (vscale * C0), C1) to (vscale * (C0 * C1)).
* Fold (shl (vscale * C0), C1) to (vscale * (C0 << C1)).

The test `sve-gep-ll` have been updated to reflect the folding introduced by this patch.

Reviewers: efriedma, sdesmalen, andwar, rengolin

Reviewed By: sdesmalen

Subscribers: hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D74782

Added: 
    llvm/test/CodeGen/AArch64/sve-vscale-combine.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/AArch64/sve-gep.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 4a684efcb395..4f7598c5ccaf 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -2330,6 +2330,13 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
       DAG.haveNoCommonBitsSet(N0, N1))
     return DAG.getNode(ISD::OR, DL, VT, N0, N1);
 
+  // Fold (add (vscale * C0), (vscale * C1)) to (vscale * (C0 + C1)).
+  if (N0.getOpcode() == ISD::VSCALE && N1.getOpcode() == ISD::VSCALE) {
+    APInt C0 = N0->getConstantOperandAPInt(0);
+    APInt C1 = N1->getConstantOperandAPInt(0);
+    return DAG.getVScale(DL, VT, C0 + C1);
+  }
+
   return SDValue();
 }
 
@@ -3253,6 +3260,12 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
     }
   }
 
+  // canonicalize (sub X, (vscale * C)) to (add X,  (vscale * -C))
+  if (N1.getOpcode() == ISD::VSCALE) {
+    APInt IntVal = N1.getConstantOperandAPInt(0);
+    return DAG.getNode(ISD::ADD, DL, VT, N0, DAG.getVScale(DL, VT, -IntVal));
+  }
+
   // Prefer an add for more folding potential and possibly better codegen:
   // sub N0, (lshr N10, width-1) --> add N0, (ashr N10, width-1)
   if (!LegalOperations && N1.getOpcode() == ISD::SRL && N1.hasOneUse()) {
@@ -3588,6 +3601,14 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
                          DAG.getNode(ISD::MUL, SDLoc(N1), VT,
                                      N0.getOperand(1), N1));
 
+  // Fold (mul (vscale * C0), C1) to (vscale * (C0 * C1)).
+  if (N0.getOpcode() == ISD::VSCALE)
+    if (ConstantSDNode *NC1 = isConstOrConstSplat(N1)) {
+      APInt C0 = N0.getConstantOperandAPInt(0);
+      APInt C1 = NC1->getAPIntValue();
+      return DAG.getVScale(SDLoc(N), VT, C0 * C1);
+    }
+
   // reassociate mul
   if (SDValue RMUL = reassociateOps(ISD::MUL, SDLoc(N), N0, N1, N->getFlags()))
     return RMUL;
@@ -7759,6 +7780,15 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
     if (SDValue NewSHL = visitShiftByConstant(N))
       return NewSHL;
 
+  // Fold (shl (vscale * C0), C1) to (vscale * (C0 << C1)).
+  if (N0.getOpcode() == ISD::VSCALE)
+    if (ConstantSDNode *NC1 = isConstOrConstSplat(N->getOperand(1))) {
+      auto DL = SDLoc(N);
+      APInt C0 = N0.getConstantOperandAPInt(0);
+      APInt C1 = NC1->getAPIntValue();
+      return DAG.getVScale(DL, VT, C0 << C1);
+    }
+
   return SDValue();
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-gep.ll b/llvm/test/CodeGen/AArch64/sve-gep.ll
index a798913bfde4..1b558a833f3b 100644
--- a/llvm/test/CodeGen/AArch64/sve-gep.ll
+++ b/llvm/test/CodeGen/AArch64/sve-gep.ll
@@ -4,8 +4,8 @@
 define <vscale x 2 x i64>* @scalar_of_scalable_1(<vscale x 2 x i64>* %base) {
 ; CHECK-LABEL: scalar_of_scalable_1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #1
-; CHECK-NEXT:    add x0, x0, x8, lsl #2
+; CHECK-NEXT:    rdvl x8, #4
+; CHECK-NEXT:    add x0, x0, x8
 ; CHECK-NEXT:    ret
   %d = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 4
   ret <vscale x 2 x i64>* %d

diff  --git a/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll b/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll
new file mode 100644
index 000000000000..7ef9259bf7a5
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll
@@ -0,0 +1,97 @@
+; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s |FileCheck %s
+
+declare i32 @llvm.vscale.i32()
+declare i64 @llvm.vscale.i64()
+
+; Fold (add (vscale * C0), (vscale * C1)) to (vscale * (C0 + C1)).
+define i64 @combine_add_vscale_i64() nounwind {
+; CHECK-LABEL: combine_add_vscale_i64:
+; CHECK-NOT:   add
+; CHECK-NEXT:  cntd  x0
+; CHECK-NEXT:  ret
+ %vscale = call i64 @llvm.vscale.i64()
+ %add = add i64 %vscale, %vscale
+ ret i64 %add
+}
+
+define i32 @combine_add_vscale_i32() nounwind {
+; CHECK-LABEL: combine_add_vscale_i32:
+; CHECK-NOT:   add
+; CHECK-NEXT:  cntd  x0
+; CHECK-NEXT:  ret
+ %vscale = call i32 @llvm.vscale.i32()
+ %add = add i32 %vscale, %vscale
+ ret i32 %add
+}
+
+; Fold (mul (vscale * C0), C1) to (vscale * (C0 * C1)).
+; In this test, C0 = 1, C1 = 32.
+define i64 @combine_mul_vscale_i64() nounwind {
+; CHECK-LABEL: combine_mul_vscale_i64:
+; CHECK-NOT:   mul
+; CHECK-NEXT:  rdvl  x0, #2
+; CHECK-NEXT:  ret
+ %vscale = call i64 @llvm.vscale.i64()
+ %mul = mul i64 %vscale, 32
+ ret i64 %mul
+}
+
+define i32 @combine_mul_vscale_i32() nounwind {
+; CHECK-LABEL: combine_mul_vscale_i32:
+; CHECK-NOT:   mul
+; CHECK-NEXT:  rdvl  x0, #3
+; CHECK-NEXT:  ret
+ %vscale = call i32 @llvm.vscale.i32()
+ %mul = mul i32 %vscale, 48
+ ret i32 %mul
+}
+
+; Canonicalize (sub X, (vscale * C)) to (add X,  (vscale * -C))
+define i64 @combine_sub_vscale_i64(i64 %in) nounwind {
+; CHECK-LABEL: combine_sub_vscale_i64:
+; CHECK-NOT:   sub
+; CHECK-NEXT:  rdvl  x8, #-1
+; CHECK-NEXT:  asr   x8, x8, #4
+; CHECK-NEXT:  add   x0, x0, x8
+; CHECK-NEXT:  ret
+ %vscale = call i64 @llvm.vscale.i64()
+ %sub = sub i64 %in,  %vscale
+ ret i64 %sub
+}
+
+define i32 @combine_sub_vscale_i32(i32 %in) nounwind {
+; CHECK-LABEL: combine_sub_vscale_i32:
+; CHECK-NOT:   sub
+; CHECK-NEXT:  rdvl  x8, #-1
+; CHECK-NEXT:  asr   x8, x8, #4
+; CHECK-NEXT:  add   w0, w0, w8
+; CHECK-NEXT:  ret
+ %vscale = call i32 @llvm.vscale.i32()
+ %sub = sub i32 %in, %vscale
+ ret i32 %sub
+}
+
+; Fold (shl (vscale * C0), C1) to (vscale * (C0 << C1)).
+; C0 = 1 , C1 = 4
+; At IR level,  %shl = 2^4 * VSCALE.
+; At Assembly level, the output of RDVL is also 2^4 * VSCALE.
+; Hence, the immediate for RDVL is #1.
+define i64 @combine_shl_vscale_i64() nounwind {
+; CHECK-LABEL: combine_shl_vscale_i64:
+; CHECK-NOT:   shl
+; CHECK-NEXT:  rdvl  x0, #1
+; CHECK-NEXT:  ret
+ %vscale = call i64 @llvm.vscale.i64()
+ %shl = shl i64 %vscale, 4
+ ret i64 %shl
+}
+
+define i32 @combine_shl_vscale_i32() nounwind {
+; CHECK-LABEL: combine_shl_vscale_i32:
+; CHECK-NOT:   shl
+; CHECK-NEXT:  rdvl  x0, #1
+; CHECK-NEXT:  ret
+ %vscale = call i32 @llvm.vscale.i32()
+ %shl = shl i32 %vscale, 4
+ ret i32 %shl
+}


        


More information about the llvm-commits mailing list