[llvm] a61c4a0 - [RISCV][SelectionDAG] Lower shuffles as bitrotates with vror.vi when possible

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 30 03:01:56 PDT 2023


Author: Luke Lau
Date: 2023-08-30T11:01:47+01:00
New Revision: a61c4a0ef6f93fe21df5c40c5c45fc484d5e83e0

URL: https://github.com/llvm/llvm-project/commit/a61c4a0ef6f93fe21df5c40c5c45fc484d5e83e0
DIFF: https://github.com/llvm/llvm-project/commit/a61c4a0ef6f93fe21df5c40c5c45fc484d5e83e0.diff

LOG: [RISCV][SelectionDAG] Lower shuffles as bitrotates with vror.vi when possible

Given a shuffle mask like <3, 0, 1, 2, 7, 4, 5, 6> for v8i8, we can
reinterpret it as a shuffle of v2i32 where the two i32s are bit rotated, and
lower it as a vror.vi (if legal with zvbb enabled).
We also need to make sure that the larger element type is a valid SEW, hence
the tests for zve32x.

X86 already did this, so I've extracted the logic for it and put it inside
ShuffleVectorSDNode so it could be reused by RISC-V. I originally tried to add
this as a generic combine in DAGCombiner.cpp, but it ended up causing worse
codegen on X86 and PPC.

Reviewed By: reames, pengfei

Differential Revision: https://reviews.llvm.org/D157417

Added: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll

Modified: 
    llvm/include/llvm/IR/Instructions.h
    llvm/lib/IR/Instructions.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h
index 61e9e099ed992c0..99e96c4cccc73d8 100644
--- a/llvm/include/llvm/IR/Instructions.h
+++ b/llvm/include/llvm/IR/Instructions.h
@@ -2444,6 +2444,21 @@ class ShuffleVectorInst : public Instruction {
     return isInterleaveMask(Mask, Factor, NumInputElts, StartIndexes);
   }
 
+  /// Checks if the shuffle is a bit rotation of the first operand across
+  /// multiple subelements, e.g:
+  ///
+  /// shuffle <8 x i8> %a, <8 x i8> poison, <8 x i32> <1, 0, 3, 2, 5, 4, 7, 6>
+  ///
+  /// could be expressed as
+  ///
+  /// rotl <4 x i16> %a, 8
+  ///
+  /// If it can be expressed as a rotation, returns the number of subelements to
+  /// group by in NumSubElts and the number of bits to rotate left in RotateAmt.
+  static bool isBitRotateMask(ArrayRef<int> Mask, unsigned EltSizeInBits,
+                              unsigned MinSubElts, unsigned MaxSubElts,
+                              unsigned &NumSubElts, unsigned &RotateAmt);
+
   // Methods for support type inquiry through isa, cast, and dyn_cast:
   static bool classof(const Instruction *I) {
     return I->getOpcode() == Instruction::ShuffleVector;

diff  --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 31c3d30065f5f66..544581b3e1bf3cd 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -2833,6 +2833,45 @@ bool ShuffleVectorInst::isInterleaveMask(
   return true;
 }
 
+/// Try to lower a vector shuffle as a bit rotation.
+///
+/// Look for a repeated rotation pattern in each sub group.
+/// Returns an element-wise left bit rotation amount or -1 if failed.
+static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
+  int NumElts = Mask.size();
+  assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
+
+  int RotateAmt = -1;
+  for (int i = 0; i != NumElts; i += NumSubElts) {
+    for (int j = 0; j != NumSubElts; ++j) {
+      int M = Mask[i + j];
+      if (M < 0)
+        continue;
+      if (M < i || M >= i + NumSubElts)
+        return -1;
+      int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
+      if (0 <= RotateAmt && Offset != RotateAmt)
+        return -1;
+      RotateAmt = Offset;
+    }
+  }
+  return RotateAmt;
+}
+
+bool ShuffleVectorInst::isBitRotateMask(
+    ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
+    unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
+  for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
+    int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
+    if (EltRotateAmt < 0)
+      continue;
+    RotateAmt = EltRotateAmt * EltSizeInBits;
+    return true;
+  }
+
+  return false;
+}
+
 //===----------------------------------------------------------------------===//
 //                             InsertValueInst Class
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 6a21aca0700f2bc..7ebbdcde4bea561 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -4260,6 +4260,51 @@ static SDValue lowerBitreverseShuffle(ShuffleVectorSDNode *SVN,
   return Res;
 }
 
+// Given a shuffle mask like <3, 0, 1, 2, 7, 4, 5, 6> for v8i8, we can
+// reinterpret it as a shuffle of v2i32 where the two i32s are bit rotated, and
+// lower it as a vror.vi (if legal with zvbb enabled).
+static SDValue lowerVECTOR_SHUFFLEAsRotate(ShuffleVectorSDNode *SVN,
+                                           SelectionDAG &DAG,
+                                           const RISCVSubtarget &Subtarget) {
+  SDLoc DL(SVN);
+
+  EVT VT = SVN->getValueType(0);
+  unsigned NumElts = VT.getVectorNumElements();
+  unsigned EltSizeInBits = VT.getScalarSizeInBits();
+  unsigned NumSubElts, RotateAmt;
+  if (!ShuffleVectorInst::isBitRotateMask(SVN->getMask(), EltSizeInBits, 2,
+                                          NumElts, NumSubElts, RotateAmt))
+    return SDValue();
+  MVT RotateVT = MVT::getVectorVT(MVT::getIntegerVT(EltSizeInBits * NumSubElts),
+                                  NumElts / NumSubElts);
+
+  // We might have a RotateVT that isn't legal, e.g. v4i64 on zve32x.
+  if (!Subtarget.getTargetLowering()->isOperationLegalOrCustom(ISD::ROTL,
+                                                               RotateVT))
+    return SDValue();
+
+  // If we just create the shift amount with
+  //
+  // DAG.getConstant(RotateAmt, DL, RotateVT)
+  //
+  // then for e64 we get a weird bitcasted build_vector on RV32 that we're
+  // unable to detect as a splat during pattern matching. So directly lower it
+  // to a vmv.v.x which gets matched to vror.vi.
+  MVT ContainerVT = getContainerForFixedLengthVector(DAG, RotateVT, Subtarget);
+  SDValue VL =
+      getDefaultVLOps(RotateVT, ContainerVT, DL, DAG, Subtarget).second;
+  SDValue RotateAmtSplat = DAG.getNode(
+      RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
+      DAG.getConstant(RotateAmt, DL, Subtarget.getXLenVT()), VL);
+  RotateAmtSplat =
+      convertFromScalableVector(RotateVT, RotateAmtSplat, DAG, Subtarget);
+
+  SDValue Rotate =
+      DAG.getNode(ISD::ROTL, DL, RotateVT,
+                  DAG.getBitcast(RotateVT, SVN->getOperand(0)), RotateAmtSplat);
+  return DAG.getBitcast(VT, Rotate);
+}
+
 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
                                    const RISCVSubtarget &Subtarget) {
   SDValue V1 = Op.getOperand(0);
@@ -4270,6 +4315,11 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
   unsigned NumElts = VT.getVectorNumElements();
   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
 
+  // Lower to a vror.vi of a larger element type if possible. Do this before we
+  // promote i1s to i8s.
+  if (SDValue V = lowerVECTOR_SHUFFLEAsRotate(SVN, DAG, Subtarget))
+    return V;
+
   if (VT.getVectorElementType() == MVT::i1) {
     if (SDValue V = lowerBitreverseShuffle(SVN, DAG, Subtarget))
       return V;

diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 0f6f90c7907cf2f..b0da91f4ada0ca2 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -10984,31 +10984,6 @@ static SDValue lowerShuffleAsDecomposedShuffleMerge(
   return DAG.getVectorShuffle(VT, DL, V1, V2, FinalMask);
 }
 
-/// Try to lower a vector shuffle as a bit rotation.
-///
-/// Look for a repeated rotation pattern in each sub group.
-/// Returns a ISD::ROTL element rotation amount or -1 if failed.
-static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
-  int NumElts = Mask.size();
-  assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
-
-  int RotateAmt = -1;
-  for (int i = 0; i != NumElts; i += NumSubElts) {
-    for (int j = 0; j != NumSubElts; ++j) {
-      int M = Mask[i + j];
-      if (M < 0)
-        continue;
-      if (!isInRange(M, i, i + NumSubElts))
-        return -1;
-      int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
-      if (0 <= RotateAmt && Offset != RotateAmt)
-        return -1;
-      RotateAmt = Offset;
-    }
-  }
-  return RotateAmt;
-}
-
 static int matchShuffleAsBitRotate(MVT &RotateVT, int EltSizeInBits,
                                    const X86Subtarget &Subtarget,
                                    ArrayRef<int> Mask) {
@@ -11018,18 +10993,14 @@ static int matchShuffleAsBitRotate(MVT &RotateVT, int EltSizeInBits,
   // AVX512 only has vXi32/vXi64 rotates, so limit the rotation sub group size.
   int MinSubElts = Subtarget.hasAVX512() ? std::max(32 / EltSizeInBits, 2) : 2;
   int MaxSubElts = 64 / EltSizeInBits;
-  for (int NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
-    int RotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
-    if (RotateAmt < 0)
-      continue;
-
-    int NumElts = Mask.size();
-    MVT RotateSVT = MVT::getIntegerVT(EltSizeInBits * NumSubElts);
-    RotateVT = MVT::getVectorVT(RotateSVT, NumElts / NumSubElts);
-    return RotateAmt * EltSizeInBits;
-  }
-
-  return -1;
+  unsigned RotateAmt, NumSubElts;
+  if (!ShuffleVectorInst::isBitRotateMask(Mask, EltSizeInBits, MinSubElts,
+                                          MaxSubElts, NumSubElts, RotateAmt))
+    return -1;
+  unsigned NumElts = Mask.size();
+  MVT RotateSVT = MVT::getIntegerVT(EltSizeInBits * NumSubElts);
+  RotateVT = MVT::getVectorVT(RotateSVT, NumElts / NumSubElts);
+  return RotateAmt;
 }
 
 /// Lower shuffle using X86ISD::VROTLI rotations.

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
index 6667a4969a75d04..2e3b8fb82f1920f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
@@ -169,13 +169,19 @@ define <1 x i8> @reverse_v1i8(<1 x i8> %a) {
 }
 
 define <2 x i8> @reverse_v2i8(<2 x i8> %a) {
-; CHECK-LABEL: reverse_v2i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 1
-; CHECK-NEXT:    vslideup.vi v9, v8, 1
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; NO-ZVBB-LABEL: reverse_v2i8:
+; NO-ZVBB:       # %bb.0:
+; NO-ZVBB-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; NO-ZVBB-NEXT:    vslidedown.vi v9, v8, 1
+; NO-ZVBB-NEXT:    vslideup.vi v9, v8, 1
+; NO-ZVBB-NEXT:    vmv1r.v v8, v9
+; NO-ZVBB-NEXT:    ret
+;
+; ZVBB-LABEL: reverse_v2i8:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; ZVBB-NEXT:    vror.vi v8, v8, 8
+; ZVBB-NEXT:    ret
   %res = call <2 x i8> @llvm.experimental.vector.reverse.v2i8(<2 x i8> %a)
   ret <2 x i8> %res
 }
@@ -258,13 +264,19 @@ define <1 x i16> @reverse_v1i16(<1 x i16> %a) {
 }
 
 define <2 x i16> @reverse_v2i16(<2 x i16> %a) {
-; CHECK-LABEL: reverse_v2i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 1
-; CHECK-NEXT:    vslideup.vi v9, v8, 1
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; NO-ZVBB-LABEL: reverse_v2i16:
+; NO-ZVBB:       # %bb.0:
+; NO-ZVBB-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; NO-ZVBB-NEXT:    vslidedown.vi v9, v8, 1
+; NO-ZVBB-NEXT:    vslideup.vi v9, v8, 1
+; NO-ZVBB-NEXT:    vmv1r.v v8, v9
+; NO-ZVBB-NEXT:    ret
+;
+; ZVBB-LABEL: reverse_v2i16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; ZVBB-NEXT:    vror.vi v8, v8, 16
+; ZVBB-NEXT:    ret
   %res = call <2 x i16> @llvm.experimental.vector.reverse.v2i16(<2 x i16> %a)
   ret <2 x i16> %res
 }
@@ -332,13 +344,19 @@ define <1 x i32> @reverse_v1i32(<1 x i32> %a) {
 }
 
 define <2 x i32> @reverse_v2i32(<2 x i32> %a) {
-; CHECK-LABEL: reverse_v2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 1
-; CHECK-NEXT:    vslideup.vi v9, v8, 1
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; NO-ZVBB-LABEL: reverse_v2i32:
+; NO-ZVBB:       # %bb.0:
+; NO-ZVBB-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; NO-ZVBB-NEXT:    vslidedown.vi v9, v8, 1
+; NO-ZVBB-NEXT:    vslideup.vi v9, v8, 1
+; NO-ZVBB-NEXT:    vmv1r.v v8, v9
+; NO-ZVBB-NEXT:    ret
+;
+; ZVBB-LABEL: reverse_v2i32:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; ZVBB-NEXT:    vror.vi v8, v8, 32
+; ZVBB-NEXT:    ret
   %res = call <2 x i32> @llvm.experimental.vector.reverse.v2i32(<2 x i32> %a)
   ret <2 x i32> %res
 }
@@ -572,13 +590,19 @@ define <1 x half> @reverse_v1f16(<1 x half> %a) {
 }
 
 define <2 x half> @reverse_v2f16(<2 x half> %a) {
-; CHECK-LABEL: reverse_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 1
-; CHECK-NEXT:    vslideup.vi v9, v8, 1
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; NO-ZVBB-LABEL: reverse_v2f16:
+; NO-ZVBB:       # %bb.0:
+; NO-ZVBB-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; NO-ZVBB-NEXT:    vslidedown.vi v9, v8, 1
+; NO-ZVBB-NEXT:    vslideup.vi v9, v8, 1
+; NO-ZVBB-NEXT:    vmv1r.v v8, v9
+; NO-ZVBB-NEXT:    ret
+;
+; ZVBB-LABEL: reverse_v2f16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; ZVBB-NEXT:    vror.vi v8, v8, 16
+; ZVBB-NEXT:    ret
   %res = call <2 x half> @llvm.experimental.vector.reverse.v2f16(<2 x half> %a)
   ret <2 x half> %res
 }
@@ -646,13 +670,19 @@ define <1 x float> @reverse_v1f32(<1 x float> %a) {
 }
 
 define <2 x float> @reverse_v2f32(<2 x float> %a) {
-; CHECK-LABEL: reverse_v2f32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 1
-; CHECK-NEXT:    vslideup.vi v9, v8, 1
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; NO-ZVBB-LABEL: reverse_v2f32:
+; NO-ZVBB:       # %bb.0:
+; NO-ZVBB-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; NO-ZVBB-NEXT:    vslidedown.vi v9, v8, 1
+; NO-ZVBB-NEXT:    vslideup.vi v9, v8, 1
+; NO-ZVBB-NEXT:    vmv1r.v v8, v9
+; NO-ZVBB-NEXT:    ret
+;
+; ZVBB-LABEL: reverse_v2f32:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; ZVBB-NEXT:    vror.vi v8, v8, 32
+; ZVBB-NEXT:    ret
   %res = call <2 x float> @llvm.experimental.vector.reverse.v2f32(<2 x float> %a)
   ret <2 x float> %res
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll
new file mode 100644
index 000000000000000..327c3c23ebb2686
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll
@@ -0,0 +1,767 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=riscv32 -mattr=+v,+zvfh -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK
+; RUN: llc -mtriple=riscv32 -mattr=+v,+zvfh,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVBB-V
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVBB-V
+; RUN: llc -mtriple=riscv32 -mattr=+zve32x,+zvfh,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVBB-ZVE32X
+; RUN: llc -mtriple=riscv64 -mattr=+zve32x,+zvfh,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVBB-ZVE32X
+
+define <8 x i1> @shuffle_v8i1_as_i8_1(<8 x i1> %v) {
+; CHECK-LABEL: shuffle_v8i1_as_i8_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
+; CHECK-NEXT:    vslideup.vi v9, v8, 7
+; CHECK-NEXT:    vmsne.vi v0, v9, 0
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i1_as_i8_1:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; ZVBB-V-NEXT:    vror.vi v0, v0, 1
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i1_as_i8_1:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; ZVBB-ZVE32X-NEXT:    vror.vi v0, v0, 1
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i1> %v, <8 x i1> poison, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0>
+  ret <8 x i1> %shuffle
+}
+
+define <8 x i1> @shuffle_v8i1_as_i8_2(<8 x i1> %v) {
+; CHECK-LABEL: shuffle_v8i1_as_i8_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    vslidedown.vi v9, v8, 2
+; CHECK-NEXT:    vslideup.vi v9, v8, 6
+; CHECK-NEXT:    vmsne.vi v0, v9, 0
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i1_as_i8_2:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; ZVBB-V-NEXT:    vror.vi v0, v0, 2
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i1_as_i8_2:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; ZVBB-ZVE32X-NEXT:    vror.vi v0, v0, 2
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i1> %v, <8 x i1> poison, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1>
+  ret <8 x i1> %shuffle
+}
+
+define <8 x i1> @shuffle_v8i1_as_i8_3(<8 x i1> %v) {
+; CHECK-LABEL: shuffle_v8i1_as_i8_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    vslidedown.vi v9, v8, 3
+; CHECK-NEXT:    vslideup.vi v9, v8, 5
+; CHECK-NEXT:    vmsne.vi v0, v9, 0
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i1_as_i8_3:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; ZVBB-V-NEXT:    vror.vi v0, v0, 3
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i1_as_i8_3:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; ZVBB-ZVE32X-NEXT:    vror.vi v0, v0, 3
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i1> %v, <8 x i1> poison, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2>
+  ret <8 x i1> %shuffle
+}
+
+define <8 x i1> @shuffle_v8i1_as_i8_4(<8 x i1> %v) {
+; CHECK-LABEL: shuffle_v8i1_as_i8_4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    vslidedown.vi v9, v8, 4
+; CHECK-NEXT:    vslideup.vi v9, v8, 4
+; CHECK-NEXT:    vmsne.vi v0, v9, 0
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i1_as_i8_4:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; ZVBB-V-NEXT:    vror.vi v0, v0, 4
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i1_as_i8_4:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; ZVBB-ZVE32X-NEXT:    vror.vi v0, v0, 4
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i1> %v, <8 x i1> poison, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
+  ret <8 x i1> %shuffle
+}
+
+define <8 x i1> @shuffle_v8i1_as_i8_5(<8 x i1> %v) {
+; CHECK-LABEL: shuffle_v8i1_as_i8_5:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    vslidedown.vi v9, v8, 5
+; CHECK-NEXT:    vslideup.vi v9, v8, 3
+; CHECK-NEXT:    vmsne.vi v0, v9, 0
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i1_as_i8_5:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; ZVBB-V-NEXT:    vror.vi v0, v0, 5
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i1_as_i8_5:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; ZVBB-ZVE32X-NEXT:    vror.vi v0, v0, 5
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i1> %v, <8 x i1> poison, <8 x i32> <i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4>
+  ret <8 x i1> %shuffle
+}
+
+define <8 x i1> @shuffle_v8i1_as_i8_6(<8 x i1> %v) {
+; CHECK-LABEL: shuffle_v8i1_as_i8_6:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    vslidedown.vi v9, v8, 6
+; CHECK-NEXT:    vslideup.vi v9, v8, 2
+; CHECK-NEXT:    vmsne.vi v0, v9, 0
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i1_as_i8_6:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; ZVBB-V-NEXT:    vror.vi v0, v0, 6
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i1_as_i8_6:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; ZVBB-ZVE32X-NEXT:    vror.vi v0, v0, 6
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i1> %v, <8 x i1> poison, <8 x i32> <i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
+  ret <8 x i1> %shuffle
+}
+
+define <8 x i1> @shuffle_v8i1_as_i8_7(<8 x i1> %v) {
+; CHECK-LABEL: shuffle_v8i1_as_i8_7:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    vslidedown.vi v9, v8, 7
+; CHECK-NEXT:    vslideup.vi v9, v8, 1
+; CHECK-NEXT:    vmsne.vi v0, v9, 0
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i1_as_i8_7:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; ZVBB-V-NEXT:    vror.vi v0, v0, 7
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i1_as_i8_7:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; ZVBB-ZVE32X-NEXT:    vror.vi v0, v0, 7
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i1> %v, <8 x i1> poison, <8 x i32> <i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6>
+  ret <8 x i1> %shuffle
+}
+
+define <8 x i8> @shuffle_v8i8_as_i16(<8 x i8> %v) {
+; CHECK-LABEL: shuffle_v8i8_as_i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI7_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI7_0)
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vle8.v v10, (a0)
+; CHECK-NEXT:    vrgather.vv v9, v8, v10
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i8_as_i16:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 8
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i8_as_i16:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 4, e16, m2, ta, ma
+; ZVBB-ZVE32X-NEXT:    vror.vi v8, v8, 8
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+  ret <8 x i8> %shuffle
+}
+
+define <8 x i8> @shuffle_v8i8_as_i32_8(<8 x i8> %v) {
+; CHECK-LABEL: shuffle_v8i8_as_i32_8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI8_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI8_0)
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vle8.v v10, (a0)
+; CHECK-NEXT:    vrgather.vv v9, v8, v10
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i8_as_i32_8:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 8
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i8_as_i32_8:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
+; ZVBB-ZVE32X-NEXT:    vror.vi v8, v8, 8
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> <i32 1, i32 2, i32 3, i32 0, i32 5, i32 6, i32 7, i32 4>
+  ret <8 x i8> %shuffle
+}
+
+define <8 x i8> @shuffle_v8i8_as_i32_16(<8 x i8> %v) {
+; CHECK-LABEL: shuffle_v8i8_as_i32_16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI9_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI9_0)
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vle8.v v10, (a0)
+; CHECK-NEXT:    vrgather.vv v9, v8, v10
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i8_as_i32_16:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 16
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i8_as_i32_16:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
+; ZVBB-ZVE32X-NEXT:    vror.vi v8, v8, 16
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 6, i32 7, i32 4, i32 5>
+  ret <8 x i8> %shuffle
+}
+
+define <8 x i8> @shuffle_v8i8_as_i32_24(<8 x i8> %v) {
+; CHECK-LABEL: shuffle_v8i8_as_i32_24:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI10_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI10_0)
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vle8.v v10, (a0)
+; CHECK-NEXT:    vrgather.vv v9, v8, v10
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i8_as_i32_24:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 24
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i8_as_i32_24:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
+; ZVBB-ZVE32X-NEXT:    vror.vi v8, v8, 24
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> <i32 3, i32 0, i32 1, i32 2, i32 7, i32 4, i32 5, i32 6>
+  ret <8 x i8> %shuffle
+}
+
+define <8 x i8> @shuffle_v8i8_as_i64_8(<8 x i8> %v) {
+; CHECK-LABEL: shuffle_v8i8_as_i64_8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
+; CHECK-NEXT:    vslideup.vi v9, v8, 7
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i8_as_i64_8:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 8
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i8_as_i64_8:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 8, e8, m2, ta, ma
+; ZVBB-ZVE32X-NEXT:    vslidedown.vi v10, v8, 1
+; ZVBB-ZVE32X-NEXT:    vslideup.vi v10, v8, 7
+; ZVBB-ZVE32X-NEXT:    vmv.v.v v8, v10
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0>
+  ret <8 x i8> %shuffle
+}
+
+define <8 x i8> @shuffle_v8i8_as_i64_16(<8 x i8> %v) {
+; CHECK-LABEL: shuffle_v8i8_as_i64_16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 2
+; CHECK-NEXT:    vslideup.vi v9, v8, 6
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i8_as_i64_16:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 16
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i8_as_i64_16:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 8, e8, m2, ta, ma
+; ZVBB-ZVE32X-NEXT:    vslidedown.vi v10, v8, 2
+; ZVBB-ZVE32X-NEXT:    vslideup.vi v10, v8, 6
+; ZVBB-ZVE32X-NEXT:    vmv.v.v v8, v10
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1>
+  ret <8 x i8> %shuffle
+}
+
+define <8 x i8> @shuffle_v8i8_as_i64_24(<8 x i8> %v) {
+; CHECK-LABEL: shuffle_v8i8_as_i64_24:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 3
+; CHECK-NEXT:    vslideup.vi v9, v8, 5
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i8_as_i64_24:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 24
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i8_as_i64_24:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 8, e8, m2, ta, ma
+; ZVBB-ZVE32X-NEXT:    vslidedown.vi v10, v8, 3
+; ZVBB-ZVE32X-NEXT:    vslideup.vi v10, v8, 5
+; ZVBB-ZVE32X-NEXT:    vmv.v.v v8, v10
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2>
+  ret <8 x i8> %shuffle
+}
+
+define <8 x i8> @shuffle_v8i8_as_i64_32(<8 x i8> %v) {
+; CHECK-LABEL: shuffle_v8i8_as_i64_32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 4
+; CHECK-NEXT:    vslideup.vi v9, v8, 4
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i8_as_i64_32:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 32
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i8_as_i64_32:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 8, e8, m2, ta, ma
+; ZVBB-ZVE32X-NEXT:    vslidedown.vi v10, v8, 4
+; ZVBB-ZVE32X-NEXT:    vslideup.vi v10, v8, 4
+; ZVBB-ZVE32X-NEXT:    vmv.v.v v8, v10
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
+  ret <8 x i8> %shuffle
+}
+
+define <8 x i8> @shuffle_v8i8_as_i64_40(<8 x i8> %v) {
+; CHECK-LABEL: shuffle_v8i8_as_i64_40:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 5
+; CHECK-NEXT:    vslideup.vi v9, v8, 3
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i8_as_i64_40:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 40
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i8_as_i64_40:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 8, e8, m2, ta, ma
+; ZVBB-ZVE32X-NEXT:    vslidedown.vi v10, v8, 5
+; ZVBB-ZVE32X-NEXT:    vslideup.vi v10, v8, 3
+; ZVBB-ZVE32X-NEXT:    vmv.v.v v8, v10
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> <i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4>
+  ret <8 x i8> %shuffle
+}
+
+define <8 x i8> @shuffle_v8i8_as_i64_48(<8 x i8> %v) {
+; CHECK-LABEL: shuffle_v8i8_as_i64_48:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 6
+; CHECK-NEXT:    vslideup.vi v9, v8, 2
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i8_as_i64_48:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 48
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i8_as_i64_48:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 8, e8, m2, ta, ma
+; ZVBB-ZVE32X-NEXT:    vslidedown.vi v10, v8, 6
+; ZVBB-ZVE32X-NEXT:    vslideup.vi v10, v8, 2
+; ZVBB-ZVE32X-NEXT:    vmv.v.v v8, v10
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> <i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
+  ret <8 x i8> %shuffle
+}
+
+define <8 x i8> @shuffle_v8i8_as_i64_56(<8 x i8> %v) {
+; CHECK-LABEL: shuffle_v8i8_as_i64_56:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 7
+; CHECK-NEXT:    vslideup.vi v9, v8, 1
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i8_as_i64_56:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 56
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i8_as_i64_56:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 8, e8, m2, ta, ma
+; ZVBB-ZVE32X-NEXT:    vslidedown.vi v10, v8, 7
+; ZVBB-ZVE32X-NEXT:    vslideup.vi v10, v8, 1
+; ZVBB-ZVE32X-NEXT:    vmv.v.v v8, v10
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> <i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6>
+  ret <8 x i8> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_as_i32(<8 x i16> %v) {
+; CHECK-LABEL: shuffle_v8i16_as_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI18_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI18_0)
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vle16.v v10, (a0)
+; CHECK-NEXT:    vrgather.vv v9, v8, v10
+; CHECK-NEXT:    vmv.v.v v8, v9
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i16_as_i32:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 16
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i16_as_i32:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 4, e32, m4, ta, ma
+; ZVBB-ZVE32X-NEXT:    vror.vi v8, v8, 16
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i16> %v, <8 x i16> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+  ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_as_i64_16(<8 x i16> %v) {
+; CHECK-LABEL: shuffle_v8i16_as_i64_16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI19_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI19_0)
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vle16.v v10, (a0)
+; CHECK-NEXT:    vrgather.vv v9, v8, v10
+; CHECK-NEXT:    vmv.v.v v8, v9
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i16_as_i64_16:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 16
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i16_as_i64_16:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    lui a0, %hi(.LCPI19_0)
+; ZVBB-ZVE32X-NEXT:    addi a0, a0, %lo(.LCPI19_0)
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 8, e16, m4, ta, ma
+; ZVBB-ZVE32X-NEXT:    vle16.v v16, (a0)
+; ZVBB-ZVE32X-NEXT:    vrgather.vv v12, v8, v16
+; ZVBB-ZVE32X-NEXT:    vmv.v.v v8, v12
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i16> %v, <8 x i16> poison, <8 x i32> <i32 1, i32 2, i32 3, i32 0, i32 5, i32 6, i32 7, i32 4>
+  ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_as_i64_32(<8 x i16> %v) {
+; CHECK-LABEL: shuffle_v8i16_as_i64_32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI20_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI20_0)
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vle16.v v10, (a0)
+; CHECK-NEXT:    vrgather.vv v9, v8, v10
+; CHECK-NEXT:    vmv.v.v v8, v9
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i16_as_i64_32:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 32
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i16_as_i64_32:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    lui a0, %hi(.LCPI20_0)
+; ZVBB-ZVE32X-NEXT:    addi a0, a0, %lo(.LCPI20_0)
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 8, e16, m4, ta, ma
+; ZVBB-ZVE32X-NEXT:    vle16.v v16, (a0)
+; ZVBB-ZVE32X-NEXT:    vrgather.vv v12, v8, v16
+; ZVBB-ZVE32X-NEXT:    vmv.v.v v8, v12
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i16> %v, <8 x i16> poison, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 6, i32 7, i32 4, i32 5>
+  ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_as_i64_48(<8 x i16> %v) {
+; CHECK-LABEL: shuffle_v8i16_as_i64_48:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI21_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI21_0)
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vle16.v v10, (a0)
+; CHECK-NEXT:    vrgather.vv v9, v8, v10
+; CHECK-NEXT:    vmv.v.v v8, v9
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i16_as_i64_48:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 48
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i16_as_i64_48:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    lui a0, %hi(.LCPI21_0)
+; ZVBB-ZVE32X-NEXT:    addi a0, a0, %lo(.LCPI21_0)
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 8, e16, m4, ta, ma
+; ZVBB-ZVE32X-NEXT:    vle16.v v16, (a0)
+; ZVBB-ZVE32X-NEXT:    vrgather.vv v12, v8, v16
+; ZVBB-ZVE32X-NEXT:    vmv.v.v v8, v12
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i16> %v, <8 x i16> poison, <8 x i32> <i32 3, i32 0, i32 1, i32 2, i32 7, i32 4, i32 5, i32 6>
+  ret <8 x i16> %shuffle
+}
+
+define <8 x i32> @shuffle_v8i32_as_i64(<8 x i32> %v) {
+; CHECK-LABEL: shuffle_v8i32_as_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI22_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI22_0)
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vle32.v v12, (a0)
+; CHECK-NEXT:    vrgather.vv v10, v8, v12
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8i32_as_i64:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 32
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8i32_as_i64:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    lui a0, %hi(.LCPI22_0)
+; ZVBB-ZVE32X-NEXT:    addi a0, a0, %lo(.LCPI22_0)
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 8, e32, m8, ta, ma
+; ZVBB-ZVE32X-NEXT:    vle32.v v24, (a0)
+; ZVBB-ZVE32X-NEXT:    vrgather.vv v16, v8, v24
+; ZVBB-ZVE32X-NEXT:    vmv.v.v v8, v16
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x i32> %v, <8 x i32> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+  ret <8 x i32> %shuffle
+}
+
+define <8 x half> @shuffle_v8f16_as_i32(<8 x half> %v) {
+; CHECK-LABEL: shuffle_v8f16_as_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI23_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI23_0)
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vle16.v v10, (a0)
+; CHECK-NEXT:    vrgather.vv v9, v8, v10
+; CHECK-NEXT:    vmv.v.v v8, v9
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8f16_as_i32:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 16
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8f16_as_i32:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 4, e32, m4, ta, ma
+; ZVBB-ZVE32X-NEXT:    vror.vi v8, v8, 16
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x half> %v, <8 x half> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+  ret <8 x half> %shuffle
+}
+
+define <8 x half> @shuffle_v8f16_as_i64_16(<8 x half> %v) {
+; CHECK-LABEL: shuffle_v8f16_as_i64_16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI24_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI24_0)
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vle16.v v10, (a0)
+; CHECK-NEXT:    vrgather.vv v9, v8, v10
+; CHECK-NEXT:    vmv.v.v v8, v9
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8f16_as_i64_16:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 16
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8f16_as_i64_16:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    lui a0, %hi(.LCPI24_0)
+; ZVBB-ZVE32X-NEXT:    addi a0, a0, %lo(.LCPI24_0)
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 8, e16, m4, ta, ma
+; ZVBB-ZVE32X-NEXT:    vle16.v v16, (a0)
+; ZVBB-ZVE32X-NEXT:    vrgather.vv v12, v8, v16
+; ZVBB-ZVE32X-NEXT:    vmv.v.v v8, v12
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x half> %v, <8 x half> poison, <8 x i32> <i32 1, i32 2, i32 3, i32 0, i32 5, i32 6, i32 7, i32 4>
+  ret <8 x half> %shuffle
+}
+
+define <8 x half> @shuffle_v8f16_as_i64_32(<8 x half> %v) {
+; CHECK-LABEL: shuffle_v8f16_as_i64_32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI25_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI25_0)
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vle16.v v10, (a0)
+; CHECK-NEXT:    vrgather.vv v9, v8, v10
+; CHECK-NEXT:    vmv.v.v v8, v9
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8f16_as_i64_32:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 32
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8f16_as_i64_32:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    lui a0, %hi(.LCPI25_0)
+; ZVBB-ZVE32X-NEXT:    addi a0, a0, %lo(.LCPI25_0)
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 8, e16, m4, ta, ma
+; ZVBB-ZVE32X-NEXT:    vle16.v v16, (a0)
+; ZVBB-ZVE32X-NEXT:    vrgather.vv v12, v8, v16
+; ZVBB-ZVE32X-NEXT:    vmv.v.v v8, v12
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x half> %v, <8 x half> poison, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 6, i32 7, i32 4, i32 5>
+  ret <8 x half> %shuffle
+}
+
+define <8 x half> @shuffle_v8f16_as_i64_48(<8 x half> %v) {
+; CHECK-LABEL: shuffle_v8f16_as_i64_48:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI26_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI26_0)
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vle16.v v10, (a0)
+; CHECK-NEXT:    vrgather.vv v9, v8, v10
+; CHECK-NEXT:    vmv.v.v v8, v9
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8f16_as_i64_48:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 48
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8f16_as_i64_48:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    lui a0, %hi(.LCPI26_0)
+; ZVBB-ZVE32X-NEXT:    addi a0, a0, %lo(.LCPI26_0)
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 8, e16, m4, ta, ma
+; ZVBB-ZVE32X-NEXT:    vle16.v v16, (a0)
+; ZVBB-ZVE32X-NEXT:    vrgather.vv v12, v8, v16
+; ZVBB-ZVE32X-NEXT:    vmv.v.v v8, v12
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x half> %v, <8 x half> poison, <8 x i32> <i32 3, i32 0, i32 1, i32 2, i32 7, i32 4, i32 5, i32 6>
+  ret <8 x half> %shuffle
+}
+
+define <8 x float> @shuffle_v8f32_as_i64(<8 x float> %v) {
+; CHECK-LABEL: shuffle_v8f32_as_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI27_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI27_0)
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vle32.v v12, (a0)
+; CHECK-NEXT:    vrgather.vv v10, v8, v12
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+;
+; ZVBB-V-LABEL: shuffle_v8f32_as_i64:
+; ZVBB-V:       # %bb.0:
+; ZVBB-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; ZVBB-V-NEXT:    vror.vi v8, v8, 32
+; ZVBB-V-NEXT:    ret
+;
+; ZVBB-ZVE32X-LABEL: shuffle_v8f32_as_i64:
+; ZVBB-ZVE32X:       # %bb.0:
+; ZVBB-ZVE32X-NEXT:    lui a0, %hi(.LCPI27_0)
+; ZVBB-ZVE32X-NEXT:    addi a0, a0, %lo(.LCPI27_0)
+; ZVBB-ZVE32X-NEXT:    vsetivli zero, 8, e32, m8, ta, ma
+; ZVBB-ZVE32X-NEXT:    vle32.v v24, (a0)
+; ZVBB-ZVE32X-NEXT:    vrgather.vv v16, v8, v24
+; ZVBB-ZVE32X-NEXT:    vmv.v.v v8, v16
+; ZVBB-ZVE32X-NEXT:    ret
+  %shuffle = shufflevector <8 x float> %v, <8 x float> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+  ret <8 x float> %shuffle
+}


        


More information about the llvm-commits mailing list