[llvm] e80ca3a - [RISCV] Fix INSERT/EXTRACT_SUBVECTOR on fractional LMUL types

Fraser Cormack via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 1 03:57:25 PST 2021


Author: Fraser Cormack
Date: 2021-03-01T11:51:05Z
New Revision: e80ca3af82f8177a1b239bab6bb25d08ec86adeb

URL: https://github.com/llvm/llvm-project/commit/e80ca3af82f8177a1b239bab6bb25d08ec86adeb
DIFF: https://github.com/llvm/llvm-project/commit/e80ca3af82f8177a1b239bab6bb25d08ec86adeb.diff

LOG: [RISCV] Fix INSERT/EXTRACT_SUBVECTOR on fractional LMUL types

This patch fixes a bug where the lowering for INSERT_SUBVECTOR and
EXTRACT_SUBVECTOR would insist on first extracting a register-aligned
LMUL1 vector type before perfoming the slide up/down. This was even if
the vector was a fractional LMUL type, in which case the aligned
EXTRACT_SUBVECTOR was invalid.

This issue only occurred for scalable vector types, but a variety of
tests for both scalable and fixed-length vectors have been added to
ensure this does not regress in the future.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D97556

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
    llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index b3bacef0b083..0800e5345adb 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -2504,14 +2504,16 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
   // (in our case undisturbed). This means we can set up a subvector insertion
   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
   // size of the subvector.
-  MVT InterSubVT = getLMUL1VT(VecVT);
-
-  // Extract a subvector equal to the nearest full vector register type. This
-  // should resolve to a EXTRACT_SUBREG instruction.
+  MVT InterSubVT = VecVT;
+  SDValue AlignedExtract = Vec;
   unsigned AlignedIdx = OrigIdx - RemIdx;
-  SDValue AlignedExtract =
-      DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
-                  DAG.getConstant(AlignedIdx, DL, XLenVT));
+  if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
+    InterSubVT = getLMUL1VT(VecVT);
+    // Extract a subvector equal to the nearest full vector register type. This
+    // should resolve to a EXTRACT_SUBREG instruction.
+    AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
+                                 DAG.getConstant(AlignedIdx, DL, XLenVT));
+  }
 
   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
   // For scalable vectors this must be further multiplied by vscale.
@@ -2532,10 +2534,12 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
 
-  // Insert this subvector into the correct vector register. This should
-  // resolve to an INSERT_SUBREG instruction.
-  return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
-                     DAG.getConstant(AlignedIdx, DL, XLenVT));
+  // If required, insert this subvector back into the correct vector register.
+  // This should resolve to an INSERT_SUBREG instruction.
+  if (VecVT.bitsGT(InterSubVT))
+    Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
+                          DAG.getConstant(AlignedIdx, DL, XLenVT));
+  return Slideup;
 }
 
 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
@@ -2630,13 +2634,15 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
   // Else we must shift our vector register directly to extract the subvector.
   // Do this using VSLIDEDOWN.
 
-  // Extract a subvector equal to the nearest full vector register type. This
-  // should resolve to a EXTRACT_SUBREG instruction.
-  unsigned AlignedIdx = OrigIdx - RemIdx;
-  MVT InterSubVT = getLMUL1VT(VecVT);
-  SDValue AlignedExtract =
-      DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
-                  DAG.getConstant(AlignedIdx, DL, XLenVT));
+  // If the vector type is an LMUL-group type, extract a subvector equal to the
+  // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
+  // instruction.
+  MVT InterSubVT = VecVT;
+  if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
+    InterSubVT = getLMUL1VT(VecVT);
+    Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
+                      DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
+  }
 
   // Slide this vector register down by the desired number of elements in order
   // to place the desired subvector starting at element 0.
@@ -2646,9 +2652,9 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
 
   SDValue Mask, VL;
   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
-  SDValue Slidedown = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
-                                  DAG.getUNDEF(InterSubVT), AlignedExtract,
-                                  SlidedownAmt, Mask, VL);
+  SDValue Slidedown =
+      DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
+                  DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
 
   // Now the vector is in the right position, extract our final subvector. This
   // should resolve to a COPY.

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
index 3f264792e1ac..96cc00a3f1a4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
@@ -328,6 +328,20 @@ define <vscale x 1 x i8> @extract_nxv8i8_nxv1i8_7(<vscale x 8 x i8> %vec) {
   ret <vscale x 1 x i8> %c
 }
 
+define <vscale x 1 x i8> @extract_nxv4i8_nxv1i8_3(<vscale x 4 x i8> %vec) {
+; CHECK-LABEL: extract_nxv4i8_nxv1i8_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    srli a0, a0, 3
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %c = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, i64 3)
+  ret <vscale x 1 x i8> %c
+}
+
 define <vscale x 2 x half> @extract_nxv2f16_nxv16f16_0(<vscale x 16 x half> %vec) {
 ; CHECK-LABEL: extract_nxv2f16_nxv16f16_0:
 ; CHECK:       # %bb.0:
@@ -403,6 +417,52 @@ define <vscale x 2 x i1> @extract_nxv64i1_nxv2i1_2(<vscale x 64 x i1> %mask) {
   ret <vscale x 2 x i1> %c
 }
 
+define <vscale x 4 x i1> @extract_nxv4i1_nxv32i1_0(<vscale x 32 x i1> %x) {
+; CHECK-LABEL: extract_nxv4i1_nxv32i1_0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret
+  %c = call <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1(<vscale x 32 x i1> %x, i64 0)
+  ret <vscale x 4 x i1> %c
+}
+
+define <vscale x 4 x i1> @extract_nxv4i1_nxv32i1_4(<vscale x 32 x i1> %x) {
+; CHECK-LABEL: extract_nxv4i1_nxv32i1_4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m4,ta,mu
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v28, v28, 1, v0
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    srli a0, a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vslidedown.vx v25, v28, a0
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %c = call <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1(<vscale x 32 x i1> %x, i64 4)
+  ret <vscale x 4 x i1> %c
+}
+
+define <vscale x 16 x i1> @extract_nxv16i1_nxv32i1_0(<vscale x 32 x i1> %x) {
+; CHECK-LABEL: extract_nxv16i1_nxv32i1_0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret
+  %c = call <vscale x 16 x i1> @llvm.experimental.vector.extract.nxv16i1(<vscale x 32 x i1> %x, i64 0)
+  ret <vscale x 16 x i1> %c
+}
+
+define <vscale x 16 x i1> @extract_nxv16i1_nxv32i1_16(<vscale x 32 x i1> %x) {
+; CHECK-LABEL: extract_nxv16i1_nxv32i1_16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    srli a0, a0, 2
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vslidedown.vx v0, v0, a0
+; CHECK-NEXT:    ret
+  %c = call <vscale x 16 x i1> @llvm.experimental.vector.extract.nxv16i1(<vscale x 32 x i1> %x, i64 16)
+  ret <vscale x 16 x i1> %c
+}
+
+declare <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, i64 %idx)
 declare <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv8i8(<vscale x 8 x i8> %vec, i64 %idx)
 
 declare <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 %idx)
@@ -419,5 +479,8 @@ declare <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<v
 
 declare <vscale x 2 x half> @llvm.experimental.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 %idx)
 
+declare <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1(<vscale x 32 x i1> %vec, i64 %idx)
+declare <vscale x 16 x i1> @llvm.experimental.vector.extract.nxv16i1(<vscale x 32 x i1> %vec, i64 %idx)
+
 declare <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1(<vscale x 64 x i1> %vec, i64 %idx)
 declare <vscale x 8 x i1> @llvm.experimental.vector.extract.nxv8i1(<vscale x 64 x i1> %vec, i64 %idx)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
index 8618598938f7..df520196d7e2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
@@ -2,6 +2,35 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
 
+define void @extract_v2i8_v4i8_0(<4 x i8>* %x, <2 x i8>* %y) {
+; CHECK-LABEL: extract_v2i8_v4i8_0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a2, 4, e8,m1,ta,mu
+; CHECK-NEXT:    vle8.v v25, (a0)
+; CHECK-NEXT:    vsetivli a0, 2, e8,m1,ta,mu
+; CHECK-NEXT:    vse8.v v25, (a1)
+; CHECK-NEXT:    ret
+  %a = load <4 x i8>, <4 x i8>* %x
+  %c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.v4i8(<4 x i8> %a, i64 0)
+  store <2 x i8> %c, <2 x i8>* %y
+  ret void
+}
+
+define void @extract_v2i8_v4i8_2(<4 x i8>* %x, <2 x i8>* %y) {
+; CHECK-LABEL: extract_v2i8_v4i8_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a2, 4, e8,m1,ta,mu
+; CHECK-NEXT:    vle8.v v25, (a0)
+; CHECK-NEXT:    vsetivli a0, 2, e8,m1,ta,mu
+; CHECK-NEXT:    vslidedown.vi v25, v25, 2
+; CHECK-NEXT:    vse8.v v25, (a1)
+; CHECK-NEXT:    ret
+  %a = load <4 x i8>, <4 x i8>* %x
+  %c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.v4i8(<4 x i8> %a, i64 2)
+  store <2 x i8> %c, <2 x i8>* %y
+  ret void
+}
+
 define void @extract_v2i8_v8i8_0(<8 x i8>* %x, <2 x i8>* %y) {
 ; CHECK-LABEL: extract_v2i8_v8i8_0:
 ; CHECK:       # %bb.0:
@@ -128,6 +157,30 @@ define void @extract_v2i32_nxv16i32_8(<vscale x 16 x i32> %x, <2 x i32>* %y) {
   ret void
 }
 
+define void @extract_v2i8_nxv2i8_0(<vscale x 2 x i8> %x, <2 x i8>* %y) {
+; CHECK-LABEL: extract_v2i8_nxv2i8_0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 2, e8,m1,ta,mu
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    ret
+  %c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %x, i64 0)
+  store <2 x i8> %c, <2 x i8>* %y
+  ret void
+}
+
+define void @extract_v2i8_nxv2i8_2(<vscale x 2 x i8> %x, <2 x i8>* %y) {
+; CHECK-LABEL: extract_v2i8_nxv2i8_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 2, e8,mf4,ta,mu
+; CHECK-NEXT:    vslidedown.vi v25, v8, 2
+; CHECK-NEXT:    vsetivli a1, 2, e8,m1,ta,mu
+; CHECK-NEXT:    vse8.v v25, (a0)
+; CHECK-NEXT:    ret
+  %c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %x, i64 2)
+  store <2 x i8> %c, <2 x i8>* %y
+  ret void
+}
+
 define void @extract_v8i32_nxv16i32_8(<vscale x 16 x i32> %x, <8 x i32>* %y) {
 ; LMULMAX2-LABEL: extract_v8i32_nxv16i32_8:
 ; LMULMAX2:       # %bb.0:
@@ -458,17 +511,53 @@ define void @extract_v2i1_nxv64i1_42(<vscale x 64 x i1> %x, <2 x i1>* %y) {
   ret void
 }
 
+define void @extract_v2i1_nxv32i1_26(<vscale x 32 x i1> %x, <2 x i1>* %y) {
+; CHECK-LABEL: extract_v2i1_nxv32i1_26:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,m4,ta,mu
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v28, v28, 1, v0
+; CHECK-NEXT:    vsetivli a1, 2, e8,m4,ta,mu
+; CHECK-NEXT:    vslidedown.vi v28, v28, 26
+; CHECK-NEXT:    vsetivli a1, 2, e8,m1,ta,mu
+; CHECK-NEXT:    vmsne.vi v25, v28, 0
+; CHECK-NEXT:    vse1.v v25, (a0)
+; CHECK-NEXT:    ret
+  %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv32i1(<vscale x 32 x i1> %x, i64 26)
+  store <2 x i1> %c, <2 x i1>* %y
+  ret void
+}
+
+define void @extract_v8i1_nxv32i1_16(<vscale x 32 x i1> %x, <8 x i1>* %y) {
+; CHECK-LABEL: extract_v8i1_nxv32i1_16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 1, e8,mf2,ta,mu
+; CHECK-NEXT:    vslidedown.vi v25, v0, 2
+; CHECK-NEXT:    vsetivli a1, 8, e8,m1,ta,mu
+; CHECK-NEXT:    vse1.v v25, (a0)
+; CHECK-NEXT:    ret
+  %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv32i1(<vscale x 32 x i1> %x, i64 16)
+  store <8 x i1> %c, <8 x i1>* %y
+  ret void
+}
+
 declare <2 x i1> @llvm.experimental.vector.extract.v2i1.v64i1(<64 x i1> %vec, i64 %idx)
 declare <8 x i1> @llvm.experimental.vector.extract.v8i1.v64i1(<64 x i1> %vec, i64 %idx)
 
 declare <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1> %vec, i64 %idx)
 declare <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv2i1(<vscale x 2 x i1> %vec, i64 %idx)
 
+declare <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv32i1(<vscale x 32 x i1> %vec, i64 %idx)
+declare <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv32i1(<vscale x 32 x i1> %vec, i64 %idx)
+
 declare <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv64i1(<vscale x 64 x i1> %vec, i64 %idx)
 declare <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %vec, i64 %idx)
 
+declare <2 x i8> @llvm.experimental.vector.extract.v2i8.v4i8(<4 x i8> %vec, i64 %idx)
 declare <2 x i8> @llvm.experimental.vector.extract.v2i8.v8i8(<8 x i8> %vec, i64 %idx)
 declare <2 x i32> @llvm.experimental.vector.extract.v2i32.v8i32(<8 x i32> %vec, i64 %idx)
 
+declare <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %vec, i64 %idx)
+
 declare <2 x i32> @llvm.experimental.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
 declare <8 x i32> @llvm.experimental.vector.extract.v8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index aa43d6006bb4..1638ffa3f82a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -323,9 +323,77 @@ define void @insert_v8i32_undef_v2i32_6(<8 x i32>* %vp, <2 x i32>* %svp) {
   ret void
 }
 
+define void @insert_v4i16_v2i16_0(<4 x i16>* %vp, <2 x i16>* %svp) {
+; CHECK-LABEL: insert_v4i16_v2i16_0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a2, 4, e16,m1,ta,mu
+; CHECK-NEXT:    vle16.v v25, (a0)
+; CHECK-NEXT:    vsetivli a2, 2, e16,m1,ta,mu
+; CHECK-NEXT:    vle16.v v26, (a1)
+; CHECK-NEXT:    vsetivli a1, 2, e16,m1,tu,mu
+; CHECK-NEXT:    vslideup.vi v25, v26, 0
+; CHECK-NEXT:    vsetivli a1, 4, e16,m1,ta,mu
+; CHECK-NEXT:    vse16.v v25, (a0)
+; CHECK-NEXT:    ret
+  %v = load <4 x i16>, <4 x i16>* %vp
+  %sv = load <2 x i16>, <2 x i16>* %svp
+  %c = call <4 x i16> @llvm.experimental.vector.insert.v2i16.v4i16(<4 x i16> %v, <2 x i16> %sv, i64 0)
+  store <4 x i16> %c, <4 x i16>* %vp
+  ret void
+}
+
+define void @insert_v4i16_v2i16_2(<4 x i16>* %vp, <2 x i16>* %svp) {
+; CHECK-LABEL: insert_v4i16_v2i16_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a2, 4, e16,m1,ta,mu
+; CHECK-NEXT:    vle16.v v25, (a0)
+; CHECK-NEXT:    vsetivli a2, 2, e16,m1,ta,mu
+; CHECK-NEXT:    vle16.v v26, (a1)
+; CHECK-NEXT:    vsetivli a1, 4, e16,m1,tu,mu
+; CHECK-NEXT:    vslideup.vi v25, v26, 2
+; CHECK-NEXT:    vsetivli a1, 4, e16,m1,ta,mu
+; CHECK-NEXT:    vse16.v v25, (a0)
+; CHECK-NEXT:    ret
+  %v = load <4 x i16>, <4 x i16>* %vp
+  %sv = load <2 x i16>, <2 x i16>* %svp
+  %c = call <4 x i16> @llvm.experimental.vector.insert.v2i16.v4i16(<4 x i16> %v, <2 x i16> %sv, i64 2)
+  store <4 x i16> %c, <4 x i16>* %vp
+  ret void
+}
+
+define <vscale x 2 x i16> @insert_nxv2i16_v2i16_0(<vscale x 2 x i16> %v, <2 x i16>* %svp) {
+; CHECK-LABEL: insert_nxv2i16_v2i16_0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 2, e16,m1,ta,mu
+; CHECK-NEXT:    vle16.v v25, (a0)
+; CHECK-NEXT:    vsetivli a0, 2, e16,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vi v8, v25, 0
+; CHECK-NEXT:    ret
+  %sv = load <2 x i16>, <2 x i16>* %svp
+  %c = call <vscale x 2 x i16> @llvm.experimental.vector.insert.v2i16.nxv2i16(<vscale x 2 x i16> %v, <2 x i16> %sv, i64 0)
+  ret <vscale x 2 x i16> %c
+}
+
+define <vscale x 2 x i16> @insert_nxv2i16_v2i16_2(<vscale x 2 x i16> %v, <2 x i16>* %svp) {
+; CHECK-LABEL: insert_nxv2i16_v2i16_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 2, e16,m1,ta,mu
+; CHECK-NEXT:    vle16.v v25, (a0)
+; CHECK-NEXT:    vsetivli a0, 6, e16,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vi v8, v25, 4
+; CHECK-NEXT:    ret
+  %sv = load <2 x i16>, <2 x i16>* %svp
+  %c = call <vscale x 2 x i16> @llvm.experimental.vector.insert.v2i16.nxv2i16(<vscale x 2 x i16> %v, <2 x i16> %sv, i64 4)
+  ret <vscale x 2 x i16> %c
+}
+
+declare <4 x i16> @llvm.experimental.vector.insert.v2i16.v4i16(<4 x i16>, <2 x i16>, i64)
+
 declare <4 x i32> @llvm.experimental.vector.insert.v2i32.v4i32(<4 x i32>, <2 x i32>, i64)
 declare <8 x i32> @llvm.experimental.vector.insert.v2i32.v8i32(<8 x i32>, <2 x i32>, i64)
 
+declare <vscale x 2 x i16> @llvm.experimental.vector.insert.v2i16.nxv2i16(<vscale x 2 x i16>, <2 x i16>, i64)
+
 declare <vscale x 8 x i32> @llvm.experimental.vector.insert.v2i32.nxv8i32(<vscale x 8 x i32>, <2 x i32>, i64)
 declare <vscale x 8 x i32> @llvm.experimental.vector.insert.v4i32.nxv8i32(<vscale x 8 x i32>, <4 x i32>, i64)
 declare <vscale x 8 x i32> @llvm.experimental.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32>, <8 x i32>, i64)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
index cb2e875d797d..b8cb530c211f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
@@ -55,6 +55,33 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_6(<vscale x 8 x i32> %vec, <vs
   ret <vscale x 8 x i32> %v
 }
 
+define <vscale x 4 x i8> @insert_nxv1i8_nxv4i8_0(<vscale x 4 x i8> %vec, <vscale x 1 x i8> %subvec) {
+; CHECK-LABEL: insert_nxv1i8_nxv4i8_0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    srli a0, a0, 3
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vi v8, v9, 0
+; CHECK-NEXT:    ret
+  %v = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, <vscale x 1 x i8> %subvec, i64 0)
+  ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @insert_nxv1i8_nxv4i8_3(<vscale x 4 x i8> %vec, <vscale x 1 x i8> %subvec) {
+; CHECK-LABEL: insert_nxv1i8_nxv4i8_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    srli a0, a0, 3
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a1, a1, a0
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vx v8, v9, a1
+; CHECK-NEXT:    ret
+  %v = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, <vscale x 1 x i8> %subvec, i64 3)
+  ret <vscale x 4 x i8> %v
+}
+
 define <vscale x 16 x i32> @insert_nxv16i32_nxv8i32_0(<vscale x 16 x i32> %vec, <vscale x 8 x i32> %subvec) {
 ; CHECK-LABEL: insert_nxv16i32_nxv8i32_0:
 ; CHECK:       # %bb.0:
@@ -367,6 +394,8 @@ declare <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vsca
 declare <vscale x 32 x half> @llvm.experimental.vector.insert.nxv1f16.nxv32f16(<vscale x 32 x half>, <vscale x 1 x half>, i64)
 declare <vscale x 32 x half> @llvm.experimental.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half>, <vscale x 2 x half>, i64)
 
+declare <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv4i8(<vscale x 4 x i8>, <vscale x 1 x i8>, i64 %idx)
+
 declare <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32>, <vscale x 2 x i32>, i64 %idx)
 declare <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv8i32(<vscale x 8 x i32>, <vscale x 4 x i32>, i64 %idx)
 


        


More information about the llvm-commits mailing list