[llvm] cba6aab - [RISCV] Support simple fractional steps in matching VID sequences

Fraser Cormack via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 3 02:47:45 PDT 2021


Author: Fraser Cormack
Date: 2021-08-03T10:38:24+01:00
New Revision: cba6aab9715988b522c21b0e04a7d9b888a81394

URL: https://github.com/llvm/llvm-project/commit/cba6aab9715988b522c21b0e04a7d9b888a81394
DIFF: https://github.com/llvm/llvm-project/commit/cba6aab9715988b522c21b0e04a7d9b888a81394.diff

LOG: [RISCV] Support simple fractional steps in matching VID sequences

This patch extends the optimization of VID-sequence BUILD_VECTORs
introduced in D104921 to include simple fractional steps composed of a
separated integer numerator and denominator.

A notable limitation in this sequence detection is that only sequences
with steps N/1 or 1/D are found, meaning that the step between elements
and the frequency with which it changes is consistent across the whole
sequence. Fractional steps such as 2/3 won't be matched as those would
involve more complex tracking of state or some level of backtracking.

As is stands, however, this patch is sufficient to match common
interleave-type shuffle indices, for example matching `<0,0,1,1>` (or
commonly `<0,u,1,u>` or `<u,0,u,1>`) to an index sequence divided by 2.

While the optimization is relatively `undef`-tolerant, due to greedy
pattern-matching there even are some simple patterns which confuse the
sequence detection into identifying either a suboptimal sequence or no
sequence at all.

Currently only fractional-step sequences identified as having a
power-of-two denominator are actually lowered to RVV instructions. This
is to avoid introducing divisions into the generated code.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D106533

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/common-shuffle-patterns.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
    llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index e021f8da4460..96dd698a5c75 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1397,13 +1397,18 @@ static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
 }
 
 struct VIDSequence {
-  int64_t Step;
+  int64_t StepNumerator;
+  unsigned StepDenominator;
   int64_t Addend;
 };
 
 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
 // to the (non-zero) step S and start value X. This can be then lowered as the
 // RVV sequence (VID * S) + X, for example.
+// The step S is represented as an integer numerator divided by a positive
+// denominator. Note that the implementation currently only identifies
+// sequences in which either the numerator is +/- 1 or the denominator is 1. It
+// cannot detect 2/3, for example.
 // Note that this method will also match potentially unappealing index
 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
 // determine whether this is worth generating code for.
@@ -1413,7 +1418,8 @@ static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
   if (!Op.getValueType().isInteger())
     return None;
 
-  Optional<int64_t> SeqStep, SeqAddend;
+  Optional<unsigned> SeqStepDenom;
+  Optional<int64_t> SeqStepNum, SeqAddend;
   Optional<std::pair<uint64_t, unsigned>> PrevElt;
   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
@@ -1431,26 +1437,40 @@ static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
     if (PrevElt) {
       // Calculate the step since the last non-undef element, and ensure
       // it's consistent across the entire sequence.
-      int64_t Diff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
-      // The 
diff erence must cleanly divide the element span.
-      if (Diff % (Idx - PrevElt->second) != 0)
-        return None;
-      int64_t Step = Diff / (Idx - PrevElt->second);
-      // A zero step indicates we're either a not an index sequence, or we
-      // have a fractional step. This must be handled by a more complex
-      // pattern recognition (undefs complicate things here).
-      if (Step == 0)
-        return None;
-      if (!SeqStep)
-        SeqStep = Step;
-      else if (Step != SeqStep)
-        return None;
+      unsigned IdxDiff = Idx - PrevElt->second;
+      int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
+
+      // A zero-value value 
diff erence means that we're somewhere in the middle
+      // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
+      // step change before evaluating the sequence.
+      if (ValDiff != 0) {
+        int64_t Remainder = ValDiff % IdxDiff;
+        // Normalize the step if it's greater than 1.
+        if (Remainder != ValDiff) {
+          // The 
diff erence must cleanly divide the element span.
+          if (Remainder != 0)
+            return None;
+          ValDiff /= IdxDiff;
+          IdxDiff = 1;
+        }
+
+        if (!SeqStepNum)
+          SeqStepNum = ValDiff;
+        else if (ValDiff != SeqStepNum)
+          return None;
+
+        if (!SeqStepDenom)
+          SeqStepDenom = IdxDiff;
+        else if (IdxDiff != *SeqStepDenom)
+          return None;
+      }
     }
 
     // Record and/or check any addend.
-    if (SeqStep) {
-      int64_t Addend =
-          SignExtend64(Val - (Idx * (uint64_t)*SeqStep), EltSizeInBits);
+    if (SeqStepNum && SeqStepDenom) {
+      uint64_t ExpectedVal =
+          (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
+      int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
       if (!SeqAddend)
         SeqAddend = Addend;
       else if (SeqAddend != Addend)
@@ -1458,14 +1478,15 @@ static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
     }
 
     // Record this non-undef element for later.
-    PrevElt = std::make_pair(Val, Idx);
+    if (!PrevElt || PrevElt->first != Val)
+      PrevElt = std::make_pair(Val, Idx);
   }
   // We need to have logged both a step and an addend for this to count as
   // a legal index sequence.
-  if (!SeqStep || !SeqAddend)
+  if (!SeqStepNum || !SeqStepDenom || !SeqAddend)
     return None;
 
-  return VIDSequence{*SeqStep, *SeqAddend};
+  return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
 }
 
 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
@@ -1599,31 +1620,38 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
   // with optional modifications. An all-undef vector is matched by
   // getSplatValue, above.
   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
-    int64_t Step = SimpleVID->Step;
+    int64_t StepNumerator = SimpleVID->StepNumerator;
+    unsigned StepDenominator = SimpleVID->StepDenominator;
     int64_t Addend = SimpleVID->Addend;
     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
     // threshold since it's the immediate value many RVV instructions accept.
-    if (isInt<5>(Step) && isInt<5>(Addend)) {
+    if (isInt<5>(StepNumerator) && isPowerOf2_32(StepDenominator) &&
+        isInt<5>(Addend)) {
       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
       // Convert right out of the scalable type so we can use standard ISD
       // nodes for the rest of the computation. If we used scalable types with
       // these, we'd lose the fixed-length vector info and generate worse
       // vsetvli code.
       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
-      assert(Step != 0 && "Invalid step");
+      assert(StepNumerator != 0 && "Invalid step");
       bool Negate = false;
-      if (Step != 1) {
-        int64_t SplatStepVal = Step;
+      if (StepNumerator != 1) {
+        int64_t SplatStepVal = StepNumerator;
         unsigned Opcode = ISD::MUL;
-        if (isPowerOf2_64(std::abs(Step))) {
-          Negate = Step < 0;
+        if (isPowerOf2_64(std::abs(StepNumerator))) {
+          Negate = StepNumerator < 0;
           Opcode = ISD::SHL;
-          SplatStepVal = Log2_64(std::abs(Step));
+          SplatStepVal = Log2_64(std::abs(StepNumerator));
         }
         SDValue SplatStep = DAG.getSplatVector(
             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
         VID = DAG.getNode(Opcode, DL, VT, VID, SplatStep);
       }
+      if (StepDenominator != 1) {
+        SDValue SplatStep = DAG.getSplatVector(
+            VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
+        VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
+      }
       if (Addend != 0 || Negate) {
         SDValue SplatAddend =
             DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT));

diff  --git a/llvm/test/CodeGen/RISCV/rvv/common-shuffle-patterns.ll b/llvm/test/CodeGen/RISCV/rvv/common-shuffle-patterns.ll
index 7692f29bb545..27afe2cc6437 100644
--- a/llvm/test/CodeGen/RISCV/rvv/common-shuffle-patterns.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/common-shuffle-patterns.ll
@@ -7,39 +7,33 @@ target triple = "riscv64-unknown-unknown-elf"
 define dso_local <16 x i16> @interleave(<8 x i16> %v0, <8 x i16> %v1) {
 ; CHECK-LABEL: interleave:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8m2 def $v8m2
+; CHECK-NEXT:    vmv1r.v v26, v9
+; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8m2
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
-; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmv.v.i v28, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m2, tu, mu
-; CHECK-NEXT:    vmv2r.v v28, v26
-; CHECK-NEXT:    vslideup.vi v28, v8, 0
+; CHECK-NEXT:    vmv2r.v v30, v28
+; CHECK-NEXT:    vslideup.vi v30, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v30, 0
+; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, tu, mu
-; CHECK-NEXT:    vslideup.vi v28, v30, 8
-; CHECK-NEXT:    lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI0_0)
+; CHECK-NEXT:    vslideup.vi v30, v8, 8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vle16.v v10, (a0)
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    vrgather.vv v12, v28, v10
+; CHECK-NEXT:    vid.v v10
+; CHECK-NEXT:    vsrl.vi v12, v10, 1
+; CHECK-NEXT:    vrgather.vv v14, v30, v12
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m2, tu, mu
-; CHECK-NEXT:    vslideup.vi v26, v8, 0
+; CHECK-NEXT:    vslideup.vi v28, v26, 0
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, tu, mu
-; CHECK-NEXT:    vslideup.vi v26, v30, 8
+; CHECK-NEXT:    vslideup.vi v28, v8, 8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vid.v v28
-; CHECK-NEXT:    vrgather.vv v8, v12, v28
+; CHECK-NEXT:    vrgather.vv v8, v14, v10
 ; CHECK-NEXT:    lui a0, 11
 ; CHECK-NEXT:    addiw a0, a0, -1366
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
-; CHECK-NEXT:    lui a0, %hi(.LCPI0_1)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI0_1)
-; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
-; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, tu, mu
-; CHECK-NEXT:    vrgather.vv v8, v26, v28, v0.t
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, tu, mu
+; CHECK-NEXT:    vrgather.vv v8, v28, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %v2 = shufflevector <8 x i16> %v0, <8 x i16> poison, <16 x i32> <i32 0, i32 undef, i32 1, i32 undef, i32 2, i32 undef, i32 3, i32 undef, i32 4, i32 undef, i32 5, i32 undef, i32 6, i32 undef, i32 7, i32 undef>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
index eec104a36ebe..f26ba6552462 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
@@ -552,3 +552,92 @@ define void @buildvec_seq_v4i16_v2i32(<4 x i16>* %x) {
   store <4 x i16> <i16 -127, i16 -1, i16 -127, i16 -1>, <4 x i16>* %x
   ret void
 }
+
+define void @buildvec_vid_step1o2_v4i32(<4 x i32>* %z0, <4 x i32>* %z1, <4 x i32>* %z2, <4 x i32>* %z3, <4 x i32>* %z4, <4 x i32>* %z5, <4 x i32>* %z6) {
+; CHECK-LABEL: buildvec_vid_step1o2_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vid.v v25
+; CHECK-NEXT:    vsrl.vi v25, v25, 1
+; CHECK-NEXT:    vse32.v v25, (a0)
+; CHECK-NEXT:    vse32.v v25, (a1)
+; CHECK-NEXT:    vse32.v v25, (a2)
+; CHECK-NEXT:    vse32.v v25, (a3)
+; CHECK-NEXT:    vse32.v v25, (a4)
+; CHECK-NEXT:    vmv.s.x v25, zero
+; CHECK-NEXT:    vmv.v.i v26, 1
+; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, mu
+; CHECK-NEXT:    vslideup.vi v26, v25, 1
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vse32.v v26, (a5)
+; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    vmv.s.x v25, a0
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT:    vslideup.vi v26, v25, 3
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vse32.v v26, (a6)
+; CHECK-NEXT:    ret
+  store <4 x i32> <i32 0, i32 0, i32 1, i32 1>, <4 x i32>* %z0
+  store <4 x i32> <i32 0, i32 0, i32 1, i32 undef>, <4 x i32>* %z1
+  store <4 x i32> <i32 0, i32 undef, i32 1, i32 1>, <4 x i32>* %z2
+  store <4 x i32> <i32 undef, i32 0, i32 undef, i32 1>, <4 x i32>* %z3
+  store <4 x i32> <i32 0, i32 undef, i32 1, i32 undef>, <4 x i32>* %z4
+  ; We don't catch this one
+  store <4 x i32> <i32 undef, i32 0, i32 1, i32 1>, <4 x i32>* %z5
+  ; We catch this one but as VID/3 rather than VID/2
+  store <4 x i32> <i32 0, i32 0, i32 undef, i32 1>, <4 x i32>* %z6
+  ret void
+}
+
+define void @buildvec_vid_step1o2_add3_v4i16(<4 x i16>* %z0, <4 x i16>* %z1, <4 x i16>* %z2, <4 x i16>* %z3, <4 x i16>* %z4, <4 x i16>* %z5, <4 x i16>* %z6) {
+; CHECK-LABEL: buildvec_vid_step1o2_add3_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    vid.v v25
+; CHECK-NEXT:    vsrl.vi v25, v25, 1
+; CHECK-NEXT:    vadd.vi v25, v25, 3
+; CHECK-NEXT:    vse16.v v25, (a0)
+; CHECK-NEXT:    vse16.v v25, (a1)
+; CHECK-NEXT:    vse16.v v25, (a2)
+; CHECK-NEXT:    vse16.v v25, (a3)
+; CHECK-NEXT:    vse16.v v25, (a4)
+; CHECK-NEXT:    addi a0, zero, 3
+; CHECK-NEXT:    vmv.s.x v25, a0
+; CHECK-NEXT:    vmv.v.i v26, 4
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf2, tu, mu
+; CHECK-NEXT:    vslideup.vi v26, v25, 1
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    vse16.v v26, (a5)
+; CHECK-NEXT:    addi a0, zero, 4
+; CHECK-NEXT:    vmv.s.x v25, a0
+; CHECK-NEXT:    vmv.v.i v26, 3
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
+; CHECK-NEXT:    vslideup.vi v26, v25, 3
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; CHECK-NEXT:    vse16.v v26, (a6)
+; CHECK-NEXT:    ret
+  store <4 x i16> <i16 3, i16 3, i16 4, i16 4>, <4 x i16>* %z0
+  store <4 x i16> <i16 3, i16 3, i16 4, i16 undef>, <4 x i16>* %z1
+  store <4 x i16> <i16 3, i16 undef, i16 4, i16 4>, <4 x i16>* %z2
+  store <4 x i16> <i16 undef, i16 3, i16 undef, i16 4>, <4 x i16>* %z3
+  store <4 x i16> <i16 3, i16 undef, i16 4, i16 undef>, <4 x i16>* %z4
+  ; We don't catch this one
+  store <4 x i16> <i16 undef, i16 3, i16 4, i16 4>, <4 x i16>* %z5
+  ; We catch this one but as VID/3 rather than VID/2
+  store <4 x i16> <i16 3, i16 3, i16 undef, i16 4>, <4 x i16>* %z6
+  ret void
+}
+
+define void @buildvec_vid_stepn1o4_addn5_v8i8(<8 x i8>* %z0) {
+; CHECK-LABEL: buildvec_vid_stepn1o4_addn5_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    vid.v v25
+; CHECK-NEXT:    vsrl.vi v25, v25, 2
+; CHECK-NEXT:    vrsub.vi v25, v25, -5
+; CHECK-NEXT:    vse8.v v25, (a0)
+; CHECK-NEXT:    ret
+  store <8 x i8> <i8 -5, i8 -5, i8 -5, i8 -5, i8 -6, i8 -6, i8 -6, i8 -6>, <8 x i8>* %z0
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
index d6333a739a73..709265d6b914 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
@@ -325,17 +325,15 @@ define <4 x i8> @interleave_shuffles(<4 x i8> %x) {
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 1
-; CHECK-NEXT:    vmv.s.x v26, zero
-; CHECK-NEXT:    vmv.v.i v27, 1
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, tu, mu
-; CHECK-NEXT:    vslideup.vi v27, v26, 1
 ; CHECK-NEXT:    addi a1, zero, 10
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a1
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vid.v v26
+; CHECK-NEXT:    vsrl.vi v26, v26, 1
 ; CHECK-NEXT:    vmv.v.x v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, tu, mu
-; CHECK-NEXT:    vrgather.vv v8, v25, v27, v0.t
+; CHECK-NEXT:    vrgather.vv v8, v25, v26, v0.t
 ; CHECK-NEXT:    ret
   %y = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
   %z = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index 8501cdaefb40..39b6b704fa0a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -1223,25 +1223,23 @@ define void @mulhs_v2i64(<2 x i64>* %x) {
 ; RV32-NEXT:    vmv.s.x v26, a1
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    vmulh.vv v26, v25, v26
-; RV32-NEXT:    addi a1, zero, 3
-; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
-; RV32-NEXT:    vmv.s.x v0, a1
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
-; RV32-NEXT:    vmv.v.i v27, -1
-; RV32-NEXT:    vmerge.vim v27, v27, 0, v0
+; RV32-NEXT:    vid.v v27
+; RV32-NEXT:    vsrl.vi v27, v27, 1
+; RV32-NEXT:    vrsub.vi v27, v27, 0
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    vmadd.vv v27, v25, v26
-; RV32-NEXT:    addi a1, zero, 63
-; RV32-NEXT:    vsrl.vx v25, v27, a1
 ; RV32-NEXT:    addi a1, zero, 1
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
-; RV32-NEXT:    vmv.s.x v26, a1
-; RV32-NEXT:    vmv.v.i v28, 0
+; RV32-NEXT:    vmv.s.x v25, a1
+; RV32-NEXT:    vmv.v.i v26, 0
 ; RV32-NEXT:    vsetivli zero, 3, e32, m1, tu, mu
-; RV32-NEXT:    vslideup.vi v28, v26, 2
+; RV32-NEXT:    vslideup.vi v26, v25, 2
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
-; RV32-NEXT:    vsra.vv v26, v27, v28
-; RV32-NEXT:    vadd.vv v25, v26, v25
+; RV32-NEXT:    vsra.vv v25, v27, v26
+; RV32-NEXT:    addi a1, zero, 63
+; RV32-NEXT:    vsrl.vx v26, v27, a1
+; RV32-NEXT:    vadd.vv v25, v25, v26
 ; RV32-NEXT:    vse64.v v25, (a0)
 ; RV32-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll b/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll
index 33fad2a6e67b..7f98d13e45fc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll
@@ -7,30 +7,28 @@ define void @interleave256(<256 x i16>* %agg.result, <128 x i16>* %0, <128 x i16
 ; RV64-1024:       # %bb.0: # %entry
 ; RV64-1024-NEXT:    addi a3, zero, 128
 ; RV64-1024-NEXT:    vsetvli zero, a3, e16, m2, ta, mu
-; RV64-1024-NEXT:    vle16.v v12, (a1)
-; RV64-1024-NEXT:    vle16.v v8, (a2)
+; RV64-1024-NEXT:    vle16.v v8, (a1)
+; RV64-1024-NEXT:    vle16.v v12, (a2)
 ; RV64-1024-NEXT:    addi a1, zero, 256
 ; RV64-1024-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; RV64-1024-NEXT:    vmv.v.i v28, 0
 ; RV64-1024-NEXT:    vsetvli zero, a3, e16, m4, tu, mu
 ; RV64-1024-NEXT:    vmv4r.v v16, v28
-; RV64-1024-NEXT:    vslideup.vi v16, v12, 0
+; RV64-1024-NEXT:    vslideup.vi v16, v8, 0
 ; RV64-1024-NEXT:    vsetvli zero, a3, e16, m2, ta, mu
-; RV64-1024-NEXT:    vmv.v.i v12, 0
+; RV64-1024-NEXT:    vmv.v.i v20, 0
 ; RV64-1024-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
-; RV64-1024-NEXT:    vslideup.vx v16, v12, a3
-; RV64-1024-NEXT:    lui a2, %hi(.LCPI0_0)
-; RV64-1024-NEXT:    addi a2, a2, %lo(.LCPI0_0)
+; RV64-1024-NEXT:    vslideup.vx v16, v20, a3
 ; RV64-1024-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64-1024-NEXT:    vle16.v v20, (a2)
-; RV64-1024-NEXT:    vrgather.vv v24, v16, v20
+; RV64-1024-NEXT:    vid.v v24
+; RV64-1024-NEXT:    vsrl.vi v8, v24, 1
+; RV64-1024-NEXT:    vrgather.vv v0, v16, v8
 ; RV64-1024-NEXT:    vsetvli zero, a3, e16, m4, tu, mu
-; RV64-1024-NEXT:    vslideup.vi v28, v8, 0
+; RV64-1024-NEXT:    vslideup.vi v28, v12, 0
 ; RV64-1024-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
-; RV64-1024-NEXT:    vslideup.vx v28, v12, a3
+; RV64-1024-NEXT:    vslideup.vx v28, v20, a3
 ; RV64-1024-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64-1024-NEXT:    vid.v v12
-; RV64-1024-NEXT:    vrgather.vv v8, v24, v12
+; RV64-1024-NEXT:    vrgather.vv v12, v0, v24
 ; RV64-1024-NEXT:    lui a2, 1026731
 ; RV64-1024-NEXT:    addiw a2, a2, -1365
 ; RV64-1024-NEXT:    slli a2, a2, 12
@@ -48,14 +46,10 @@ define void @interleave256(<256 x i16>* %agg.result, <128 x i16>* %0, <128 x i16
 ; RV64-1024-NEXT:    vslideup.vi v0, v25, 2
 ; RV64-1024-NEXT:    vsetivli zero, 4, e64, m1, tu, mu
 ; RV64-1024-NEXT:    vslideup.vi v0, v25, 3
-; RV64-1024-NEXT:    lui a2, %hi(.LCPI0_1)
-; RV64-1024-NEXT:    addi a2, a2, %lo(.LCPI0_1)
-; RV64-1024-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; RV64-1024-NEXT:    vle16.v v12, (a2)
-; RV64-1024-NEXT:    vsetvli zero, zero, e16, m4, tu, mu
-; RV64-1024-NEXT:    vrgather.vv v8, v28, v12, v0.t
+; RV64-1024-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
+; RV64-1024-NEXT:    vrgather.vv v12, v28, v8, v0.t
 ; RV64-1024-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64-1024-NEXT:    vse16.v v8, (a0)
+; RV64-1024-NEXT:    vse16.v v12, (a0)
 ; RV64-1024-NEXT:    ret
 ;
 ; RV64-2048-LABEL: interleave256:
@@ -71,21 +65,19 @@ define void @interleave256(<256 x i16>* %agg.result, <128 x i16>* %0, <128 x i16
 ; RV64-2048-NEXT:    vmv2r.v v8, v26
 ; RV64-2048-NEXT:    vslideup.vi v8, v28, 0
 ; RV64-2048-NEXT:    vsetvli zero, a3, e16, m1, ta, mu
-; RV64-2048-NEXT:    vmv.v.i v28, 0
+; RV64-2048-NEXT:    vmv.v.i v10, 0
 ; RV64-2048-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
-; RV64-2048-NEXT:    vslideup.vx v8, v28, a3
-; RV64-2048-NEXT:    lui a2, %hi(.LCPI0_0)
-; RV64-2048-NEXT:    addi a2, a2, %lo(.LCPI0_0)
+; RV64-2048-NEXT:    vslideup.vx v8, v10, a3
 ; RV64-2048-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV64-2048-NEXT:    vle16.v v10, (a2)
-; RV64-2048-NEXT:    vrgather.vv v12, v8, v10
+; RV64-2048-NEXT:    vid.v v12
+; RV64-2048-NEXT:    vsrl.vi v28, v12, 1
+; RV64-2048-NEXT:    vrgather.vv v14, v8, v28
 ; RV64-2048-NEXT:    vsetvli zero, a3, e16, m2, tu, mu
 ; RV64-2048-NEXT:    vslideup.vi v26, v30, 0
 ; RV64-2048-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
-; RV64-2048-NEXT:    vslideup.vx v26, v28, a3
+; RV64-2048-NEXT:    vslideup.vx v26, v10, a3
 ; RV64-2048-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV64-2048-NEXT:    vid.v v28
-; RV64-2048-NEXT:    vrgather.vv v30, v12, v28
+; RV64-2048-NEXT:    vrgather.vv v30, v14, v12
 ; RV64-2048-NEXT:    lui a2, 1026731
 ; RV64-2048-NEXT:    addiw a2, a2, -1365
 ; RV64-2048-NEXT:    slli a2, a2, 12
@@ -103,11 +95,7 @@ define void @interleave256(<256 x i16>* %agg.result, <128 x i16>* %0, <128 x i16
 ; RV64-2048-NEXT:    vslideup.vi v0, v25, 2
 ; RV64-2048-NEXT:    vsetivli zero, 4, e64, m1, tu, mu
 ; RV64-2048-NEXT:    vslideup.vi v0, v25, 3
-; RV64-2048-NEXT:    lui a2, %hi(.LCPI0_1)
-; RV64-2048-NEXT:    addi a2, a2, %lo(.LCPI0_1)
-; RV64-2048-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; RV64-2048-NEXT:    vle16.v v28, (a2)
-; RV64-2048-NEXT:    vsetvli zero, zero, e16, m2, tu, mu
+; RV64-2048-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
 ; RV64-2048-NEXT:    vrgather.vv v30, v26, v28, v0.t
 ; RV64-2048-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; RV64-2048-NEXT:    vse16.v v30, (a0)
@@ -128,7 +116,7 @@ define void @interleave512(<512 x i16>* %agg.result, <256 x i16>* %0, <256 x i16
 ; RV64-1024-NEXT:    addi sp, sp, -16
 ; RV64-1024-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-1024-NEXT:    csrr a3, vlenb
-; RV64-1024-NEXT:    addi a4, zero, 24
+; RV64-1024-NEXT:    addi a4, zero, 40
 ; RV64-1024-NEXT:    mul a3, a3, a4
 ; RV64-1024-NEXT:    sub sp, sp, a3
 ; RV64-1024-NEXT:    addi a3, zero, 256
@@ -136,51 +124,65 @@ define void @interleave512(<512 x i16>* %agg.result, <256 x i16>* %0, <256 x i16
 ; RV64-1024-NEXT:    vle16.v v24, (a1)
 ; RV64-1024-NEXT:    vle16.v v8, (a2)
 ; RV64-1024-NEXT:    csrr a1, vlenb
-; RV64-1024-NEXT:    slli a1, a1, 4
+; RV64-1024-NEXT:    addi a2, zero, 24
+; RV64-1024-NEXT:    mul a1, a1, a2
 ; RV64-1024-NEXT:    add a1, sp, a1
 ; RV64-1024-NEXT:    addi a1, a1, 16
 ; RV64-1024-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
 ; RV64-1024-NEXT:    addi a1, zero, 512
 ; RV64-1024-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; RV64-1024-NEXT:    vmv.v.i v8, 0
+; RV64-1024-NEXT:    csrr a2, vlenb
+; RV64-1024-NEXT:    slli a2, a2, 4
+; RV64-1024-NEXT:    add a2, sp, a2
+; RV64-1024-NEXT:    addi a2, a2, 16
+; RV64-1024-NEXT:    vs8r.v v8, (a2) # Unknown-size Folded Spill
 ; RV64-1024-NEXT:    vsetvli zero, a3, e16, m8, tu, mu
-; RV64-1024-NEXT:    vmv8r.v v0, v8
-; RV64-1024-NEXT:    vslideup.vi v0, v24, 0
+; RV64-1024-NEXT:    vslideup.vi v8, v24, 0
 ; RV64-1024-NEXT:    vsetvli zero, a3, e16, m4, ta, mu
 ; RV64-1024-NEXT:    vmv.v.i v16, 0
 ; RV64-1024-NEXT:    addi a2, sp, 16
 ; RV64-1024-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
 ; RV64-1024-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
-; RV64-1024-NEXT:    vslideup.vx v0, v16, a3
-; RV64-1024-NEXT:    lui a2, %hi(.LCPI1_0)
-; RV64-1024-NEXT:    addi a2, a2, %lo(.LCPI1_0)
+; RV64-1024-NEXT:    vslideup.vx v8, v16, a3
 ; RV64-1024-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
-; RV64-1024-NEXT:    vle16.v v16, (a2)
-; RV64-1024-NEXT:    vrgather.vv v24, v0, v16
+; RV64-1024-NEXT:    vid.v v24
+; RV64-1024-NEXT:    vsrl.vi v16, v24, 1
+; RV64-1024-NEXT:    csrr a2, vlenb
+; RV64-1024-NEXT:    slli a2, a2, 5
+; RV64-1024-NEXT:    add a2, sp, a2
+; RV64-1024-NEXT:    addi a2, a2, 16
+; RV64-1024-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV64-1024-NEXT:    vrgather.vv v0, v8, v16
 ; RV64-1024-NEXT:    csrr a2, vlenb
 ; RV64-1024-NEXT:    slli a2, a2, 3
 ; RV64-1024-NEXT:    add a2, sp, a2
 ; RV64-1024-NEXT:    addi a2, a2, 16
-; RV64-1024-NEXT:    vs8r.v v24, (a2) # Unknown-size Folded Spill
+; RV64-1024-NEXT:    vs8r.v v0, (a2) # Unknown-size Folded Spill
 ; RV64-1024-NEXT:    vsetvli zero, a3, e16, m8, tu, mu
 ; RV64-1024-NEXT:    csrr a2, vlenb
 ; RV64-1024-NEXT:    slli a2, a2, 4
 ; RV64-1024-NEXT:    add a2, sp, a2
 ; RV64-1024-NEXT:    addi a2, a2, 16
 ; RV64-1024-NEXT:    vl8re8.v v16, (a2) # Unknown-size Folded Reload
-; RV64-1024-NEXT:    vslideup.vi v8, v16, 0
+; RV64-1024-NEXT:    csrr a2, vlenb
+; RV64-1024-NEXT:    addi a4, zero, 24
+; RV64-1024-NEXT:    mul a2, a2, a4
+; RV64-1024-NEXT:    add a2, sp, a2
+; RV64-1024-NEXT:    addi a2, a2, 16
+; RV64-1024-NEXT:    vl8re8.v v8, (a2) # Unknown-size Folded Reload
+; RV64-1024-NEXT:    vslideup.vi v16, v8, 0
 ; RV64-1024-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
 ; RV64-1024-NEXT:    addi a2, sp, 16
-; RV64-1024-NEXT:    vl8re8.v v16, (a2) # Unknown-size Folded Reload
-; RV64-1024-NEXT:    vslideup.vx v8, v16, a3
+; RV64-1024-NEXT:    vl8re8.v v8, (a2) # Unknown-size Folded Reload
+; RV64-1024-NEXT:    vslideup.vx v16, v8, a3
 ; RV64-1024-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
-; RV64-1024-NEXT:    vid.v v24
 ; RV64-1024-NEXT:    csrr a2, vlenb
 ; RV64-1024-NEXT:    slli a2, a2, 3
 ; RV64-1024-NEXT:    add a2, sp, a2
 ; RV64-1024-NEXT:    addi a2, a2, 16
 ; RV64-1024-NEXT:    vl8re8.v v0, (a2) # Unknown-size Folded Reload
-; RV64-1024-NEXT:    vrgather.vv v16, v0, v24
+; RV64-1024-NEXT:    vrgather.vv v8, v0, v24
 ; RV64-1024-NEXT:    lui a2, 1026731
 ; RV64-1024-NEXT:    addiw a2, a2, -1365
 ; RV64-1024-NEXT:    slli a2, a2, 12
@@ -206,16 +208,17 @@ define void @interleave512(<512 x i16>* %agg.result, <256 x i16>* %0, <256 x i16
 ; RV64-1024-NEXT:    vslideup.vi v0, v25, 6
 ; RV64-1024-NEXT:    vsetivli zero, 8, e64, m1, tu, mu
 ; RV64-1024-NEXT:    vslideup.vi v0, v25, 7
-; RV64-1024-NEXT:    lui a2, %hi(.LCPI1_1)
-; RV64-1024-NEXT:    addi a2, a2, %lo(.LCPI1_1)
-; RV64-1024-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; RV64-1024-NEXT:    vle16.v v24, (a2)
-; RV64-1024-NEXT:    vsetvli zero, zero, e16, m8, tu, mu
-; RV64-1024-NEXT:    vrgather.vv v16, v8, v24, v0.t
+; RV64-1024-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
+; RV64-1024-NEXT:    csrr a1, vlenb
+; RV64-1024-NEXT:    slli a1, a1, 5
+; RV64-1024-NEXT:    add a1, sp, a1
+; RV64-1024-NEXT:    addi a1, a1, 16
+; RV64-1024-NEXT:    vl8re8.v v24, (a1) # Unknown-size Folded Reload
+; RV64-1024-NEXT:    vrgather.vv v8, v16, v24, v0.t
 ; RV64-1024-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
-; RV64-1024-NEXT:    vse16.v v16, (a0)
+; RV64-1024-NEXT:    vse16.v v8, (a0)
 ; RV64-1024-NEXT:    csrr a0, vlenb
-; RV64-1024-NEXT:    addi a1, zero, 24
+; RV64-1024-NEXT:    addi a1, zero, 40
 ; RV64-1024-NEXT:    mul a0, a0, a1
 ; RV64-1024-NEXT:    add sp, sp, a0
 ; RV64-1024-NEXT:    addi sp, sp, 16
@@ -225,30 +228,28 @@ define void @interleave512(<512 x i16>* %agg.result, <256 x i16>* %0, <256 x i16
 ; RV64-2048:       # %bb.0: # %entry
 ; RV64-2048-NEXT:    addi a3, zero, 256
 ; RV64-2048-NEXT:    vsetvli zero, a3, e16, m2, ta, mu
-; RV64-2048-NEXT:    vle16.v v12, (a1)
-; RV64-2048-NEXT:    vle16.v v8, (a2)
+; RV64-2048-NEXT:    vle16.v v8, (a1)
+; RV64-2048-NEXT:    vle16.v v12, (a2)
 ; RV64-2048-NEXT:    addi a1, zero, 512
 ; RV64-2048-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; RV64-2048-NEXT:    vmv.v.i v28, 0
 ; RV64-2048-NEXT:    vsetvli zero, a3, e16, m4, tu, mu
 ; RV64-2048-NEXT:    vmv4r.v v16, v28
-; RV64-2048-NEXT:    vslideup.vi v16, v12, 0
+; RV64-2048-NEXT:    vslideup.vi v16, v8, 0
 ; RV64-2048-NEXT:    vsetvli zero, a3, e16, m2, ta, mu
-; RV64-2048-NEXT:    vmv.v.i v12, 0
+; RV64-2048-NEXT:    vmv.v.i v20, 0
 ; RV64-2048-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
-; RV64-2048-NEXT:    vslideup.vx v16, v12, a3
-; RV64-2048-NEXT:    lui a2, %hi(.LCPI1_0)
-; RV64-2048-NEXT:    addi a2, a2, %lo(.LCPI1_0)
+; RV64-2048-NEXT:    vslideup.vx v16, v20, a3
 ; RV64-2048-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64-2048-NEXT:    vle16.v v20, (a2)
-; RV64-2048-NEXT:    vrgather.vv v24, v16, v20
+; RV64-2048-NEXT:    vid.v v24
+; RV64-2048-NEXT:    vsrl.vi v8, v24, 1
+; RV64-2048-NEXT:    vrgather.vv v0, v16, v8
 ; RV64-2048-NEXT:    vsetvli zero, a3, e16, m4, tu, mu
-; RV64-2048-NEXT:    vslideup.vi v28, v8, 0
+; RV64-2048-NEXT:    vslideup.vi v28, v12, 0
 ; RV64-2048-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
-; RV64-2048-NEXT:    vslideup.vx v28, v12, a3
+; RV64-2048-NEXT:    vslideup.vx v28, v20, a3
 ; RV64-2048-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64-2048-NEXT:    vid.v v12
-; RV64-2048-NEXT:    vrgather.vv v8, v24, v12
+; RV64-2048-NEXT:    vrgather.vv v12, v0, v24
 ; RV64-2048-NEXT:    lui a2, 1026731
 ; RV64-2048-NEXT:    addiw a2, a2, -1365
 ; RV64-2048-NEXT:    slli a2, a2, 12
@@ -274,14 +275,10 @@ define void @interleave512(<512 x i16>* %agg.result, <256 x i16>* %0, <256 x i16
 ; RV64-2048-NEXT:    vslideup.vi v0, v25, 6
 ; RV64-2048-NEXT:    vsetivli zero, 8, e64, m1, tu, mu
 ; RV64-2048-NEXT:    vslideup.vi v0, v25, 7
-; RV64-2048-NEXT:    lui a2, %hi(.LCPI1_1)
-; RV64-2048-NEXT:    addi a2, a2, %lo(.LCPI1_1)
-; RV64-2048-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; RV64-2048-NEXT:    vle16.v v12, (a2)
-; RV64-2048-NEXT:    vsetvli zero, zero, e16, m4, tu, mu
-; RV64-2048-NEXT:    vrgather.vv v8, v28, v12, v0.t
+; RV64-2048-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
+; RV64-2048-NEXT:    vrgather.vv v12, v28, v8, v0.t
 ; RV64-2048-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64-2048-NEXT:    vse16.v v8, (a0)
+; RV64-2048-NEXT:    vse16.v v12, (a0)
 ; RV64-2048-NEXT:    ret
 entry:
   %ve = load <256 x i16>, <256 x i16>* %0, align 512


        


More information about the llvm-commits mailing list