[llvm] 1b736bd - [RISCV] Enable CGP to sink splat operands of Add/Sub/Mul/Shl/LShr/AShr

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 10 09:13:48 PDT 2021


Author: Craig Topper
Date: 2021-09-10T09:04:01-07:00
New Revision: 1b736bda3bb5acd2874e862d9fd0cec335dd899b

URL: https://github.com/llvm/llvm-project/commit/1b736bda3bb5acd2874e862d9fd0cec335dd899b
DIFF: https://github.com/llvm/llvm-project/commit/1b736bda3bb5acd2874e862d9fd0cec335dd899b.diff

LOG: [RISCV] Enable CGP to sink splat operands of Add/Sub/Mul/Shl/LShr/AShr

LICM may have pulled out a splat, but with .vx instructions we
can fold it into an operation.

This patch enables CGP to reverse the LICM transform and move the
splat back into the loop.

I've started with the commutable integer operations and shifts, but we can
extend this with more operations in future patches.

Reviewed By: frasercrmck

Differential Revision: https://reviews.llvm.org/D109394

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.h
    llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 9929edf2fee69..e9ac4bbfd270f 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -28,8 +28,9 @@
 #include "llvm/CodeGen/ValueTypes.h"
 #include "llvm/IR/DiagnosticInfo.h"
 #include "llvm/IR/DiagnosticPrinter.h"
-#include "llvm/IR/IntrinsicsRISCV.h"
 #include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
+#include "llvm/IR/PatternMatch.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/KnownBits.h"
@@ -1030,6 +1031,59 @@ bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
   return Subtarget.hasStdExtZbb();
 }
 
+/// Check if sinking \p I's operands to I's basic block is profitable, because
+/// the operands can be folded into a target instruction, e.g.
+/// splats of scalars can fold into vector instructions.
+bool RISCVTargetLowering::shouldSinkOperands(
+    Instruction *I, SmallVectorImpl<Use *> &Ops) const {
+  using namespace llvm::PatternMatch;
+
+  if (!I->getType()->isVectorTy() || !Subtarget.hasStdExtV())
+    return false;
+
+  auto IsSinker = [&](Instruction *I, int Operand) {
+    switch (I->getOpcode()) {
+    case Instruction::Add:
+    case Instruction::Sub:
+    case Instruction::Mul:
+      return true;
+    case Instruction::Shl:
+    case Instruction::LShr:
+    case Instruction::AShr:
+      return Operand == 1;
+    default:
+      return false;
+    }
+  };
+
+  for (auto OpIdx : enumerate(I->operands())) {
+    if (!IsSinker(I, OpIdx.index()))
+      continue;
+
+    Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
+    // Make sure we are not already sinking this operand
+    if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
+      continue;
+
+    // We are looking for a splat that can be sunk.
+    if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
+                             m_Undef(), m_ZeroMask())))
+      continue;
+
+    // All uses of the shuffle should be sunk to avoid duplicating it across gpr
+    // and vector registers
+    for (Use &U : Op->uses()) {
+      Instruction *Insn = cast<Instruction>(U.getUser());
+      if (!IsSinker(Insn, U.getOperandNo()))
+        return false;
+    }
+
+    Ops.push_back(&Op->getOperandUse(0));
+    Ops.push_back(&OpIdx.value());
+  }
+  return true;
+}
+
 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
                                        bool ForCodeSize) const {
   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 5c0b0c360f3ff..45173fc39d281 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -314,6 +314,8 @@ class RISCVTargetLowering : public TargetLowering {
   bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override;
   bool isCheapToSpeculateCttz() const override;
   bool isCheapToSpeculateCtlz() const override;
+  bool shouldSinkOperands(Instruction *I,
+                          SmallVectorImpl<Use *> &Ops) const override;
   bool isFPImmLegal(const APFloat &Imm, EVT VT,
                     bool ForCodeSize) const override;
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index 12e122afb87a5..5c32a011e0d04 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -5,17 +5,16 @@
 define void @sink_splat_mul(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_mul:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:    addi a2, zero, 1024
 ; CHECK-NEXT:  .LBB0_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vle32.v v26, (a0)
-; CHECK-NEXT:    vmul.vv v26, v26, v25
-; CHECK-NEXT:    vse32.v v26, (a0)
-; CHECK-NEXT:    addi a1, a1, -4
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vle32.v v25, (a0)
+; CHECK-NEXT:    vmul.vx v25, v25, a1
+; CHECK-NEXT:    vse32.v v25, (a0)
+; CHECK-NEXT:    addi a2, a2, -4
 ; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    bnez a1, .LBB0_1
+; CHECK-NEXT:    bnez a2, .LBB0_1
 ; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -42,17 +41,16 @@ for.cond.cleanup:                                 ; preds = %vector.body
 define void @sink_splat_add(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_add:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:    addi a2, zero, 1024
 ; CHECK-NEXT:  .LBB1_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vle32.v v26, (a0)
-; CHECK-NEXT:    vadd.vv v26, v26, v25
-; CHECK-NEXT:    vse32.v v26, (a0)
-; CHECK-NEXT:    addi a1, a1, -4
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vle32.v v25, (a0)
+; CHECK-NEXT:    vadd.vx v25, v25, a1
+; CHECK-NEXT:    vse32.v v25, (a0)
+; CHECK-NEXT:    addi a2, a2, -4
 ; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    bnez a1, .LBB1_1
+; CHECK-NEXT:    bnez a2, .LBB1_1
 ; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -79,17 +77,16 @@ for.cond.cleanup:                                 ; preds = %vector.body
 define void @sink_splat_sub(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_sub:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:    addi a2, zero, 1024
 ; CHECK-NEXT:  .LBB2_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vle32.v v26, (a0)
-; CHECK-NEXT:    vadd.vv v26, v26, v25
-; CHECK-NEXT:    vse32.v v26, (a0)
-; CHECK-NEXT:    addi a1, a1, -4
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vle32.v v25, (a0)
+; CHECK-NEXT:    vadd.vx v25, v25, a1
+; CHECK-NEXT:    vse32.v v25, (a0)
+; CHECK-NEXT:    addi a2, a2, -4
 ; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    bnez a1, .LBB2_1
+; CHECK-NEXT:    bnez a2, .LBB2_1
 ; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -116,17 +113,16 @@ for.cond.cleanup:                                 ; preds = %vector.body
 define void @sink_splat_rsub(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_rsub:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:    addi a2, zero, 1024
 ; CHECK-NEXT:  .LBB3_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vle32.v v26, (a0)
-; CHECK-NEXT:    vsub.vv v26, v25, v26
-; CHECK-NEXT:    vse32.v v26, (a0)
-; CHECK-NEXT:    addi a1, a1, -4
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vle32.v v25, (a0)
+; CHECK-NEXT:    vrsub.vx v25, v25, a1
+; CHECK-NEXT:    vse32.v v25, (a0)
+; CHECK-NEXT:    addi a2, a2, -4
 ; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    bnez a1, .LBB3_1
+; CHECK-NEXT:    bnez a2, .LBB3_1
 ; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -153,34 +149,33 @@ for.cond.cleanup:                                 ; preds = %vector.body
 define void @sink_splat_mul_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_mul_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    srli a7, a5, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a7, a2, 1
 ; CHECK-NEXT:    addi a3, zero, 1024
 ; CHECK-NEXT:    bgeu a3, a7, .LBB4_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a3, zero
+; CHECK-NEXT:    mv t0, zero
 ; CHECK-NEXT:    j .LBB4_5
 ; CHECK-NEXT:  .LBB4_2: # %vector.ph
-; CHECK-NEXT:    mv a4, zero
+; CHECK-NEXT:    mv a5, zero
 ; CHECK-NEXT:    remu a6, a3, a7
-; CHECK-NEXT:    sub a3, a3, a6
-; CHECK-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmv.v.x v26, a1
-; CHECK-NEXT:    slli a5, a5, 1
+; CHECK-NEXT:    sub t0, a3, a6
+; CHECK-NEXT:    slli a4, a2, 1
 ; CHECK-NEXT:    mv a2, a0
 ; CHECK-NEXT:  .LBB4_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vl2re32.v v28, (a2)
-; CHECK-NEXT:    vmul.vv v28, v28, v26
-; CHECK-NEXT:    vs2r.v v28, (a2)
-; CHECK-NEXT:    add a4, a4, a7
-; CHECK-NEXT:    add a2, a2, a5
-; CHECK-NEXT:    bne a4, a3, .LBB4_3
+; CHECK-NEXT:    vl2re32.v v26, (a2)
+; CHECK-NEXT:    vsetvli a3, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vmul.vx v26, v26, a1
+; CHECK-NEXT:    vs2r.v v26, (a2)
+; CHECK-NEXT:    add a5, a5, a7
+; CHECK-NEXT:    add a2, a2, a4
+; CHECK-NEXT:    bne a5, t0, .LBB4_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a6, .LBB4_7
 ; CHECK-NEXT:  .LBB4_5: # %for.body.preheader
-; CHECK-NEXT:    addi a2, a3, -1024
-; CHECK-NEXT:    slli a3, a3, 2
+; CHECK-NEXT:    addi a2, t0, -1024
+; CHECK-NEXT:    slli a3, t0, 2
 ; CHECK-NEXT:    add a0, a0, a3
 ; CHECK-NEXT:  .LBB4_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -247,34 +242,33 @@ for.body:                                         ; preds = %for.body.preheader,
 define void @sink_splat_add_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_add_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    srli a7, a5, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a7, a2, 1
 ; CHECK-NEXT:    addi a3, zero, 1024
 ; CHECK-NEXT:    bgeu a3, a7, .LBB5_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a3, zero
+; CHECK-NEXT:    mv t0, zero
 ; CHECK-NEXT:    j .LBB5_5
 ; CHECK-NEXT:  .LBB5_2: # %vector.ph
-; CHECK-NEXT:    mv a4, zero
+; CHECK-NEXT:    mv a5, zero
 ; CHECK-NEXT:    remu a6, a3, a7
-; CHECK-NEXT:    sub a3, a3, a6
-; CHECK-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmv.v.x v26, a1
-; CHECK-NEXT:    slli a5, a5, 1
+; CHECK-NEXT:    sub t0, a3, a6
+; CHECK-NEXT:    slli a4, a2, 1
 ; CHECK-NEXT:    mv a2, a0
 ; CHECK-NEXT:  .LBB5_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vl2re32.v v28, (a2)
-; CHECK-NEXT:    vadd.vv v28, v28, v26
-; CHECK-NEXT:    vs2r.v v28, (a2)
-; CHECK-NEXT:    add a4, a4, a7
-; CHECK-NEXT:    add a2, a2, a5
-; CHECK-NEXT:    bne a4, a3, .LBB5_3
+; CHECK-NEXT:    vl2re32.v v26, (a2)
+; CHECK-NEXT:    vsetvli a3, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vadd.vx v26, v26, a1
+; CHECK-NEXT:    vs2r.v v26, (a2)
+; CHECK-NEXT:    add a5, a5, a7
+; CHECK-NEXT:    add a2, a2, a4
+; CHECK-NEXT:    bne a5, t0, .LBB5_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a6, .LBB5_7
 ; CHECK-NEXT:  .LBB5_5: # %for.body.preheader
-; CHECK-NEXT:    addi a2, a3, -1024
-; CHECK-NEXT:    slli a3, a3, 2
+; CHECK-NEXT:    addi a2, t0, -1024
+; CHECK-NEXT:    slli a3, t0, 2
 ; CHECK-NEXT:    add a0, a0, a3
 ; CHECK-NEXT:  .LBB5_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -341,34 +335,33 @@ for.body:                                         ; preds = %for.body.preheader,
 define void @sink_splat_sub_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_sub_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    srli a7, a5, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a7, a2, 1
 ; CHECK-NEXT:    addi a3, zero, 1024
 ; CHECK-NEXT:    bgeu a3, a7, .LBB6_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a3, zero
+; CHECK-NEXT:    mv t0, zero
 ; CHECK-NEXT:    j .LBB6_5
 ; CHECK-NEXT:  .LBB6_2: # %vector.ph
-; CHECK-NEXT:    mv a4, zero
+; CHECK-NEXT:    mv a5, zero
 ; CHECK-NEXT:    remu a6, a3, a7
-; CHECK-NEXT:    sub a3, a3, a6
-; CHECK-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmv.v.x v26, a1
-; CHECK-NEXT:    slli a5, a5, 1
+; CHECK-NEXT:    sub t0, a3, a6
+; CHECK-NEXT:    slli a4, a2, 1
 ; CHECK-NEXT:    mv a2, a0
 ; CHECK-NEXT:  .LBB6_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vl2re32.v v28, (a2)
-; CHECK-NEXT:    vsub.vv v28, v28, v26
-; CHECK-NEXT:    vs2r.v v28, (a2)
-; CHECK-NEXT:    add a4, a4, a7
-; CHECK-NEXT:    add a2, a2, a5
-; CHECK-NEXT:    bne a4, a3, .LBB6_3
+; CHECK-NEXT:    vl2re32.v v26, (a2)
+; CHECK-NEXT:    vsetvli a3, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vsub.vx v26, v26, a1
+; CHECK-NEXT:    vs2r.v v26, (a2)
+; CHECK-NEXT:    add a5, a5, a7
+; CHECK-NEXT:    add a2, a2, a4
+; CHECK-NEXT:    bne a5, t0, .LBB6_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a6, .LBB6_7
 ; CHECK-NEXT:  .LBB6_5: # %for.body.preheader
-; CHECK-NEXT:    addi a2, a3, -1024
-; CHECK-NEXT:    slli a3, a3, 2
+; CHECK-NEXT:    addi a2, t0, -1024
+; CHECK-NEXT:    slli a3, t0, 2
 ; CHECK-NEXT:    add a0, a0, a3
 ; CHECK-NEXT:  .LBB6_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -435,34 +428,33 @@ for.body:                                         ; preds = %for.body.preheader,
 define void @sink_splat_rsub_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_rsub_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    srli a7, a5, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a7, a2, 1
 ; CHECK-NEXT:    addi a3, zero, 1024
 ; CHECK-NEXT:    bgeu a3, a7, .LBB7_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a3, zero
+; CHECK-NEXT:    mv t0, zero
 ; CHECK-NEXT:    j .LBB7_5
 ; CHECK-NEXT:  .LBB7_2: # %vector.ph
-; CHECK-NEXT:    mv a4, zero
+; CHECK-NEXT:    mv a5, zero
 ; CHECK-NEXT:    remu a6, a3, a7
-; CHECK-NEXT:    sub a3, a3, a6
-; CHECK-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmv.v.x v26, a1
-; CHECK-NEXT:    slli a5, a5, 1
+; CHECK-NEXT:    sub t0, a3, a6
+; CHECK-NEXT:    slli a4, a2, 1
 ; CHECK-NEXT:    mv a2, a0
 ; CHECK-NEXT:  .LBB7_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vl2re32.v v28, (a2)
-; CHECK-NEXT:    vsub.vv v28, v26, v28
-; CHECK-NEXT:    vs2r.v v28, (a2)
-; CHECK-NEXT:    add a4, a4, a7
-; CHECK-NEXT:    add a2, a2, a5
-; CHECK-NEXT:    bne a4, a3, .LBB7_3
+; CHECK-NEXT:    vl2re32.v v26, (a2)
+; CHECK-NEXT:    vsetvli a3, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vrsub.vx v26, v26, a1
+; CHECK-NEXT:    vs2r.v v26, (a2)
+; CHECK-NEXT:    add a5, a5, a7
+; CHECK-NEXT:    add a2, a2, a4
+; CHECK-NEXT:    bne a5, t0, .LBB7_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a6, .LBB7_7
 ; CHECK-NEXT:  .LBB7_5: # %for.body.preheader
-; CHECK-NEXT:    addi a2, a3, -1024
-; CHECK-NEXT:    slli a3, a3, 2
+; CHECK-NEXT:    addi a2, t0, -1024
+; CHECK-NEXT:    slli a3, t0, 2
 ; CHECK-NEXT:    add a0, a0, a3
 ; CHECK-NEXT:  .LBB7_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -529,17 +521,16 @@ for.body:                                         ; preds = %for.body.preheader,
 define void @sink_splat_shl(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_shl:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:    addi a2, zero, 1024
 ; CHECK-NEXT:  .LBB8_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vle32.v v26, (a0)
-; CHECK-NEXT:    vsll.vv v26, v26, v25
-; CHECK-NEXT:    vse32.v v26, (a0)
-; CHECK-NEXT:    addi a1, a1, -4
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vle32.v v25, (a0)
+; CHECK-NEXT:    vsll.vx v25, v25, a1
+; CHECK-NEXT:    vse32.v v25, (a0)
+; CHECK-NEXT:    addi a2, a2, -4
 ; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    bnez a1, .LBB8_1
+; CHECK-NEXT:    bnez a2, .LBB8_1
 ; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -566,17 +557,16 @@ for.cond.cleanup:                                 ; preds = %vector.body
 define void @sink_splat_lshr(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_lshr:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:    addi a2, zero, 1024
 ; CHECK-NEXT:  .LBB9_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vle32.v v26, (a0)
-; CHECK-NEXT:    vsrl.vv v26, v26, v25
-; CHECK-NEXT:    vse32.v v26, (a0)
-; CHECK-NEXT:    addi a1, a1, -4
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vle32.v v25, (a0)
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
+; CHECK-NEXT:    vse32.v v25, (a0)
+; CHECK-NEXT:    addi a2, a2, -4
 ; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    bnez a1, .LBB9_1
+; CHECK-NEXT:    bnez a2, .LBB9_1
 ; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -603,17 +593,16 @@ for.cond.cleanup:                                 ; preds = %vector.body
 define void @sink_splat_ashr(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_ashr:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:    addi a2, zero, 1024
 ; CHECK-NEXT:  .LBB10_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vle32.v v26, (a0)
-; CHECK-NEXT:    vsra.vv v26, v26, v25
-; CHECK-NEXT:    vse32.v v26, (a0)
-; CHECK-NEXT:    addi a1, a1, -4
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vle32.v v25, (a0)
+; CHECK-NEXT:    vsra.vx v25, v25, a1
+; CHECK-NEXT:    vse32.v v25, (a0)
+; CHECK-NEXT:    addi a2, a2, -4
 ; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    bnez a1, .LBB10_1
+; CHECK-NEXT:    bnez a2, .LBB10_1
 ; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -640,34 +629,33 @@ for.cond.cleanup:                                 ; preds = %vector.body
 define void @sink_splat_shl_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_shl_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    srli a7, a5, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a7, a2, 1
 ; CHECK-NEXT:    addi a3, zero, 1024
 ; CHECK-NEXT:    bgeu a3, a7, .LBB11_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a3, zero
+; CHECK-NEXT:    mv t0, zero
 ; CHECK-NEXT:    j .LBB11_5
 ; CHECK-NEXT:  .LBB11_2: # %vector.ph
-; CHECK-NEXT:    mv a4, zero
+; CHECK-NEXT:    mv a5, zero
 ; CHECK-NEXT:    remu a6, a3, a7
-; CHECK-NEXT:    sub a3, a3, a6
-; CHECK-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmv.v.x v26, a1
-; CHECK-NEXT:    slli a5, a5, 1
+; CHECK-NEXT:    sub t0, a3, a6
+; CHECK-NEXT:    slli a4, a2, 1
 ; CHECK-NEXT:    mv a2, a0
 ; CHECK-NEXT:  .LBB11_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vl2re32.v v28, (a2)
-; CHECK-NEXT:    vsll.vv v28, v28, v26
-; CHECK-NEXT:    vs2r.v v28, (a2)
-; CHECK-NEXT:    add a4, a4, a7
-; CHECK-NEXT:    add a2, a2, a5
-; CHECK-NEXT:    bne a4, a3, .LBB11_3
+; CHECK-NEXT:    vl2re32.v v26, (a2)
+; CHECK-NEXT:    vsetvli a3, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vsll.vx v26, v26, a1
+; CHECK-NEXT:    vs2r.v v26, (a2)
+; CHECK-NEXT:    add a5, a5, a7
+; CHECK-NEXT:    add a2, a2, a4
+; CHECK-NEXT:    bne a5, t0, .LBB11_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a6, .LBB11_7
 ; CHECK-NEXT:  .LBB11_5: # %for.body.preheader
-; CHECK-NEXT:    addi a2, a3, -1024
-; CHECK-NEXT:    slli a3, a3, 2
+; CHECK-NEXT:    addi a2, t0, -1024
+; CHECK-NEXT:    slli a3, t0, 2
 ; CHECK-NEXT:    add a0, a0, a3
 ; CHECK-NEXT:  .LBB11_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -734,34 +722,33 @@ for.body:                                         ; preds = %for.body.preheader,
 define void @sink_splat_lshr_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_lshr_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    srli a7, a5, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a7, a2, 1
 ; CHECK-NEXT:    addi a3, zero, 1024
 ; CHECK-NEXT:    bgeu a3, a7, .LBB12_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a3, zero
+; CHECK-NEXT:    mv t0, zero
 ; CHECK-NEXT:    j .LBB12_5
 ; CHECK-NEXT:  .LBB12_2: # %vector.ph
-; CHECK-NEXT:    mv a4, zero
+; CHECK-NEXT:    mv a5, zero
 ; CHECK-NEXT:    remu a6, a3, a7
-; CHECK-NEXT:    sub a3, a3, a6
-; CHECK-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmv.v.x v26, a1
-; CHECK-NEXT:    slli a5, a5, 1
+; CHECK-NEXT:    sub t0, a3, a6
+; CHECK-NEXT:    slli a4, a2, 1
 ; CHECK-NEXT:    mv a2, a0
 ; CHECK-NEXT:  .LBB12_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vl2re32.v v28, (a2)
-; CHECK-NEXT:    vsrl.vv v28, v28, v26
-; CHECK-NEXT:    vs2r.v v28, (a2)
-; CHECK-NEXT:    add a4, a4, a7
-; CHECK-NEXT:    add a2, a2, a5
-; CHECK-NEXT:    bne a4, a3, .LBB12_3
+; CHECK-NEXT:    vl2re32.v v26, (a2)
+; CHECK-NEXT:    vsetvli a3, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vsrl.vx v26, v26, a1
+; CHECK-NEXT:    vs2r.v v26, (a2)
+; CHECK-NEXT:    add a5, a5, a7
+; CHECK-NEXT:    add a2, a2, a4
+; CHECK-NEXT:    bne a5, t0, .LBB12_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a6, .LBB12_7
 ; CHECK-NEXT:  .LBB12_5: # %for.body.preheader
-; CHECK-NEXT:    addi a2, a3, -1024
-; CHECK-NEXT:    slli a3, a3, 2
+; CHECK-NEXT:    addi a2, t0, -1024
+; CHECK-NEXT:    slli a3, t0, 2
 ; CHECK-NEXT:    add a0, a0, a3
 ; CHECK-NEXT:  .LBB12_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -828,29 +815,28 @@ for.body:                                         ; preds = %for.body.preheader,
 define void @sink_splat_ashr_scalable(i32* nocapture %a) {
 ; CHECK-LABEL: sink_splat_ashr_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a4, vlenb
-; CHECK-NEXT:    srli a1, a4, 1
+; CHECK-NEXT:    csrr a3, vlenb
+; CHECK-NEXT:    srli a7, a3, 1
 ; CHECK-NEXT:    addi a2, zero, 1024
-; CHECK-NEXT:    bgeu a2, a1, .LBB13_2
+; CHECK-NEXT:    bgeu a2, a7, .LBB13_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a2, zero
 ; CHECK-NEXT:    j .LBB13_5
 ; CHECK-NEXT:  .LBB13_2: # %vector.ph
-; CHECK-NEXT:    mv a5, zero
-; CHECK-NEXT:    remu a6, a2, a1
+; CHECK-NEXT:    mv a4, zero
+; CHECK-NEXT:    remu a6, a2, a7
 ; CHECK-NEXT:    sub a2, a2, a6
-; CHECK-NEXT:    vsetvli a3, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmv.v.i v26, 2
-; CHECK-NEXT:    slli a4, a4, 1
+; CHECK-NEXT:    slli a5, a3, 1
 ; CHECK-NEXT:    mv a3, a0
 ; CHECK-NEXT:  .LBB13_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vl2re32.v v28, (a3)
-; CHECK-NEXT:    vsra.vv v28, v28, v26
-; CHECK-NEXT:    vs2r.v v28, (a3)
-; CHECK-NEXT:    add a5, a5, a1
-; CHECK-NEXT:    add a3, a3, a4
-; CHECK-NEXT:    bne a5, a2, .LBB13_3
+; CHECK-NEXT:    vl2re32.v v26, (a3)
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vsra.vi v26, v26, 2
+; CHECK-NEXT:    vs2r.v v26, (a3)
+; CHECK-NEXT:    add a4, a4, a7
+; CHECK-NEXT:    add a3, a3, a5
+; CHECK-NEXT:    bne a4, a2, .LBB13_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a6, .LBB13_7
 ; CHECK-NEXT:  .LBB13_5: # %for.body.preheader


        


More information about the llvm-commits mailing list