[llvm] ca2c245 - [RISCV] Support INSERT_VECTOR_ELT into i1 vectors

Fraser Cormack via llvm-commits llvm-commits at lists.llvm.org
Wed May 19 01:49:42 PDT 2021


Author: Fraser Cormack
Date: 2021-05-19T09:41:50+01:00
New Revision: ca2c245ba4665bde94f8c6319185d1c9ff295167

URL: https://github.com/llvm/llvm-project/commit/ca2c245ba4665bde94f8c6319185d1c9ff295167
DIFF: https://github.com/llvm/llvm-project/commit/ca2c245ba4665bde94f8c6319185d1c9ff295167.diff

LOG: [RISCV] Support INSERT_VECTOR_ELT into i1 vectors

Like the element extraction of these vectors, we choose to promote up to
an i8 vector type and perform the insertion there.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D102697

Added: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll
    llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 2f7c0806e1d64..1b34432d96fd7 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -440,6 +440,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
 
+      setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
 
       setOperationAction(ISD::SELECT, VT, Expand);
@@ -636,6 +637,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
 
+        setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
 
         setOperationAction(ISD::LOAD, VT, Custom);
@@ -3092,6 +3094,15 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
   SDValue Val = Op.getOperand(1);
   SDValue Idx = Op.getOperand(2);
 
+  if (VecVT.getVectorElementType() == MVT::i1) {
+    // FIXME: For now we just promote to an i8 vector and insert into that,
+    // but this is probably not optimal.
+    MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
+    Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
+    Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
+    return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
+  }
+
   MVT ContainerVT = VecVT;
   // If the operand is a fixed-length vector, convert to a scalable one.
   if (VecVT.isFixedLengthVector()) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll
new file mode 100644
index 0000000000000..43e42a230b610
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll
@@ -0,0 +1,205 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <1 x i1> @insertelt_v1i1(<1 x i1> %x, i1 %elt) nounwind {
+; CHECK-LABEL: insertelt_v1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 1, e8,mf8,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v25, v25, 1, v0
+; CHECK-NEXT:    vmv.s.x v25, a0
+; CHECK-NEXT:    vand.vi v25, v25, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %y = insertelement <1 x i1> %x, i1 %elt, i64 0
+  ret <1 x i1> %y
+}
+
+define <1 x i1> @insertelt_idx_v1i1(<1 x i1> %x, i1 %elt, i32 zeroext %idx) nounwind {
+; RV32-LABEL: insertelt_idx_v1i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli a2, 1, e8,mf8,ta,mu
+; RV32-NEXT:    vmv.s.x v25, a0
+; RV32-NEXT:    vmv.v.i v26, 0
+; RV32-NEXT:    vmerge.vim v26, v26, 1, v0
+; RV32-NEXT:    addi a0, a1, 1
+; RV32-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
+; RV32-NEXT:    vslideup.vx v26, v25, a1
+; RV32-NEXT:    vsetivli a0, 1, e8,mf8,ta,mu
+; RV32-NEXT:    vand.vi v25, v26, 1
+; RV32-NEXT:    vmsne.vi v0, v25, 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: insertelt_idx_v1i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli a2, 1, e8,mf8,ta,mu
+; RV64-NEXT:    vmv.s.x v25, a0
+; RV64-NEXT:    vmv.v.i v26, 0
+; RV64-NEXT:    vmerge.vim v26, v26, 1, v0
+; RV64-NEXT:    sext.w a0, a1
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
+; RV64-NEXT:    vslideup.vx v26, v25, a0
+; RV64-NEXT:    vsetivli a0, 1, e8,mf8,ta,mu
+; RV64-NEXT:    vand.vi v25, v26, 1
+; RV64-NEXT:    vmsne.vi v0, v25, 0
+; RV64-NEXT:    ret
+  %y = insertelement <1 x i1> %x, i1 %elt, i32 %idx
+  ret <1 x i1> %y
+}
+
+define <2 x i1> @insertelt_v2i1(<2 x i1> %x, i1 %elt) nounwind {
+; CHECK-LABEL: insertelt_v2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 2, e8,mf8,ta,mu
+; CHECK-NEXT:    vmv.s.x v25, a0
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v26, v26, 1, v0
+; CHECK-NEXT:    vsetivli a0, 2, e8,mf8,tu,mu
+; CHECK-NEXT:    vslideup.vi v26, v25, 1
+; CHECK-NEXT:    vsetivli a0, 2, e8,mf8,ta,mu
+; CHECK-NEXT:    vand.vi v25, v26, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %y = insertelement <2 x i1> %x, i1 %elt, i64 1
+  ret <2 x i1> %y
+}
+
+define <2 x i1> @insertelt_idx_v2i1(<2 x i1> %x, i1 %elt, i32 zeroext %idx) nounwind {
+; RV32-LABEL: insertelt_idx_v2i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli a2, 2, e8,mf8,ta,mu
+; RV32-NEXT:    vmv.s.x v25, a0
+; RV32-NEXT:    vmv.v.i v26, 0
+; RV32-NEXT:    vmerge.vim v26, v26, 1, v0
+; RV32-NEXT:    addi a0, a1, 1
+; RV32-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
+; RV32-NEXT:    vslideup.vx v26, v25, a1
+; RV32-NEXT:    vsetivli a0, 2, e8,mf8,ta,mu
+; RV32-NEXT:    vand.vi v25, v26, 1
+; RV32-NEXT:    vmsne.vi v0, v25, 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: insertelt_idx_v2i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli a2, 2, e8,mf8,ta,mu
+; RV64-NEXT:    vmv.s.x v25, a0
+; RV64-NEXT:    vmv.v.i v26, 0
+; RV64-NEXT:    vmerge.vim v26, v26, 1, v0
+; RV64-NEXT:    sext.w a0, a1
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
+; RV64-NEXT:    vslideup.vx v26, v25, a0
+; RV64-NEXT:    vsetivli a0, 2, e8,mf8,ta,mu
+; RV64-NEXT:    vand.vi v25, v26, 1
+; RV64-NEXT:    vmsne.vi v0, v25, 0
+; RV64-NEXT:    ret
+  %y = insertelement <2 x i1> %x, i1 %elt, i32 %idx
+  ret <2 x i1> %y
+}
+
+define <8 x i1> @insertelt_v8i1(<8 x i1> %x, i1 %elt) nounwind {
+; CHECK-LABEL: insertelt_v8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a1, 8, e8,mf2,ta,mu
+; CHECK-NEXT:    vmv.s.x v25, a0
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v26, v26, 1, v0
+; CHECK-NEXT:    vsetivli a0, 2, e8,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vi v26, v25, 1
+; CHECK-NEXT:    vsetivli a0, 8, e8,mf2,ta,mu
+; CHECK-NEXT:    vand.vi v25, v26, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %y = insertelement <8 x i1> %x, i1 %elt, i64 1
+  ret <8 x i1> %y
+}
+
+define <8 x i1> @insertelt_idx_v8i1(<8 x i1> %x, i1 %elt, i32 zeroext %idx) nounwind {
+; RV32-LABEL: insertelt_idx_v8i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli a2, 8, e8,mf2,ta,mu
+; RV32-NEXT:    vmv.s.x v25, a0
+; RV32-NEXT:    vmv.v.i v26, 0
+; RV32-NEXT:    vmerge.vim v26, v26, 1, v0
+; RV32-NEXT:    addi a0, a1, 1
+; RV32-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
+; RV32-NEXT:    vslideup.vx v26, v25, a1
+; RV32-NEXT:    vsetivli a0, 8, e8,mf2,ta,mu
+; RV32-NEXT:    vand.vi v25, v26, 1
+; RV32-NEXT:    vmsne.vi v0, v25, 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: insertelt_idx_v8i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli a2, 8, e8,mf2,ta,mu
+; RV64-NEXT:    vmv.s.x v25, a0
+; RV64-NEXT:    vmv.v.i v26, 0
+; RV64-NEXT:    vmerge.vim v26, v26, 1, v0
+; RV64-NEXT:    sext.w a0, a1
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
+; RV64-NEXT:    vslideup.vx v26, v25, a0
+; RV64-NEXT:    vsetivli a0, 8, e8,mf2,ta,mu
+; RV64-NEXT:    vand.vi v25, v26, 1
+; RV64-NEXT:    vmsne.vi v0, v25, 0
+; RV64-NEXT:    ret
+  %y = insertelement <8 x i1> %x, i1 %elt, i32 %idx
+  ret <8 x i1> %y
+}
+
+define <64 x i1> @insertelt_v64i1(<64 x i1> %x, i1 %elt) nounwind {
+; CHECK-LABEL: insertelt_v64i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    vsetvli a2, a1, e8,m4,ta,mu
+; CHECK-NEXT:    vmv.s.x v28, a0
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    vsetivli a0, 2, e8,m4,tu,mu
+; CHECK-NEXT:    vslideup.vi v8, v28, 1
+; CHECK-NEXT:    vsetvli a0, a1, e8,m4,ta,mu
+; CHECK-NEXT:    vand.vi v28, v8, 1
+; CHECK-NEXT:    vmsne.vi v0, v28, 0
+; CHECK-NEXT:    ret
+  %y = insertelement <64 x i1> %x, i1 %elt, i64 1
+  ret <64 x i1> %y
+}
+
+define <64 x i1> @insertelt_idx_v64i1(<64 x i1> %x, i1 %elt, i32 zeroext %idx) nounwind {
+; RV32-LABEL: insertelt_idx_v64i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi a2, zero, 64
+; RV32-NEXT:    vsetvli a3, a2, e8,m4,ta,mu
+; RV32-NEXT:    vmv.s.x v28, a0
+; RV32-NEXT:    vmv.v.i v8, 0
+; RV32-NEXT:    vmerge.vim v8, v8, 1, v0
+; RV32-NEXT:    addi a0, a1, 1
+; RV32-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
+; RV32-NEXT:    vslideup.vx v8, v28, a1
+; RV32-NEXT:    vsetvli a0, a2, e8,m4,ta,mu
+; RV32-NEXT:    vand.vi v28, v8, 1
+; RV32-NEXT:    vmsne.vi v0, v28, 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: insertelt_idx_v64i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a2, zero, 64
+; RV64-NEXT:    vsetvli a3, a2, e8,m4,ta,mu
+; RV64-NEXT:    vmv.s.x v28, a0
+; RV64-NEXT:    vmv.v.i v8, 0
+; RV64-NEXT:    vmerge.vim v8, v8, 1, v0
+; RV64-NEXT:    sext.w a0, a1
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
+; RV64-NEXT:    vslideup.vx v8, v28, a0
+; RV64-NEXT:    vsetvli a0, a2, e8,m4,ta,mu
+; RV64-NEXT:    vand.vi v28, v8, 1
+; RV64-NEXT:    vmsne.vi v0, v28, 0
+; RV64-NEXT:    ret
+  %y = insertelement <64 x i1> %x, i1 %elt, i32 %idx
+  ret <64 x i1> %y
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll
new file mode 100644
index 0000000000000..39afff424e1e7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll
@@ -0,0 +1,248 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x i1> @insertelt_nxv1i1(<vscale x 1 x i1> %x, i1 %elt) {
+; CHECK-LABEL: insertelt_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vmv.s.x v25, a0
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v26, v26, 1, v0
+; CHECK-NEXT:    vsetivli a0, 3, e8,mf8,tu,mu
+; CHECK-NEXT:    vslideup.vi v26, v25, 2
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vand.vi v25, v26, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %y = insertelement <vscale x 1 x i1> %x, i1 %elt, i64 2
+  ret <vscale x 1 x i1> %y
+}
+
+define <vscale x 1 x i1> @insertelt_idx_nxv1i1(<vscale x 1 x i1> %x, i1 %elt, i64 %idx) {
+; CHECK-LABEL: insertelt_idx_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vmv.s.x v25, a0
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v26, v26, 1, v0
+; CHECK-NEXT:    addi a0, a1, 1
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vslideup.vx v26, v25, a1
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vand.vi v25, v26, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %y = insertelement <vscale x 1 x i1> %x, i1 %elt, i64 %idx
+  ret <vscale x 1 x i1> %y
+}
+
+define <vscale x 2 x i1> @insertelt_nxv2i1(<vscale x 2 x i1> %x, i1 %elt) {
+; CHECK-LABEL: insertelt_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vmv.s.x v25, a0
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v26, v26, 1, v0
+; CHECK-NEXT:    vsetivli a0, 3, e8,mf4,tu,mu
+; CHECK-NEXT:    vslideup.vi v26, v25, 2
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vand.vi v25, v26, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %y = insertelement <vscale x 2 x i1> %x, i1 %elt, i64 2
+  ret <vscale x 2 x i1> %y
+}
+
+define <vscale x 2 x i1> @insertelt_idx_nxv2i1(<vscale x 2 x i1> %x, i1 %elt, i64 %idx) {
+; CHECK-LABEL: insertelt_idx_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vmv.s.x v25, a0
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v26, v26, 1, v0
+; CHECK-NEXT:    addi a0, a1, 1
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vslideup.vx v26, v25, a1
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vand.vi v25, v26, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %y = insertelement <vscale x 2 x i1> %x, i1 %elt, i64 %idx
+  ret <vscale x 2 x i1> %y
+}
+
+define <vscale x 4 x i1> @insertelt_nxv4i1(<vscale x 4 x i1> %x, i1 %elt) {
+; CHECK-LABEL: insertelt_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vmv.s.x v25, a0
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v26, v26, 1, v0
+; CHECK-NEXT:    vsetivli a0, 3, e8,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vi v26, v25, 2
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vand.vi v25, v26, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %y = insertelement <vscale x 4 x i1> %x, i1 %elt, i64 2
+  ret <vscale x 4 x i1> %y
+}
+
+define <vscale x 4 x i1> @insertelt_idx_nxv4i1(<vscale x 4 x i1> %x, i1 %elt, i64 %idx) {
+; CHECK-LABEL: insertelt_idx_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vmv.s.x v25, a0
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v26, v26, 1, v0
+; CHECK-NEXT:    addi a0, a1, 1
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vx v26, v25, a1
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vand.vi v25, v26, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %y = insertelement <vscale x 4 x i1> %x, i1 %elt, i64 %idx
+  ret <vscale x 4 x i1> %y
+}
+
+define <vscale x 8 x i1> @insertelt_nxv8i1(<vscale x 8 x i1> %x, i1 %elt) {
+; CHECK-LABEL: insertelt_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmv.s.x v25, a0
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v26, v26, 1, v0
+; CHECK-NEXT:    vsetivli a0, 3, e8,m1,tu,mu
+; CHECK-NEXT:    vslideup.vi v26, v25, 2
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vand.vi v25, v26, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %y = insertelement <vscale x 8 x i1> %x, i1 %elt, i64 2
+  ret <vscale x 8 x i1> %y
+}
+
+define <vscale x 8 x i1> @insertelt_idx_nxv8i1(<vscale x 8 x i1> %x, i1 %elt, i64 %idx) {
+; CHECK-LABEL: insertelt_idx_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmv.s.x v25, a0
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v26, v26, 1, v0
+; CHECK-NEXT:    addi a0, a1, 1
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vslideup.vx v26, v25, a1
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vand.vi v25, v26, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %y = insertelement <vscale x 8 x i1> %x, i1 %elt, i64 %idx
+  ret <vscale x 8 x i1> %y
+}
+
+define <vscale x 16 x i1> @insertelt_nxv16i1(<vscale x 16 x i1> %x, i1 %elt) {
+; CHECK-LABEL: insertelt_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vmv.s.x v26, a0
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v28, v28, 1, v0
+; CHECK-NEXT:    vsetivli a0, 3, e8,m2,tu,mu
+; CHECK-NEXT:    vslideup.vi v28, v26, 2
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vand.vi v26, v28, 1
+; CHECK-NEXT:    vmsne.vi v0, v26, 0
+; CHECK-NEXT:    ret
+  %y = insertelement <vscale x 16 x i1> %x, i1 %elt, i64 2
+  ret <vscale x 16 x i1> %y
+}
+
+define <vscale x 16 x i1> @insertelt_idx_nxv16i1(<vscale x 16 x i1> %x, i1 %elt, i64 %idx) {
+; CHECK-LABEL: insertelt_idx_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vmv.s.x v26, a0
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v28, v28, 1, v0
+; CHECK-NEXT:    addi a0, a1, 1
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vslideup.vx v28, v26, a1
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vand.vi v26, v28, 1
+; CHECK-NEXT:    vmsne.vi v0, v26, 0
+; CHECK-NEXT:    ret
+  %y = insertelement <vscale x 16 x i1> %x, i1 %elt, i64 %idx
+  ret <vscale x 16 x i1> %y
+}
+
+define <vscale x 32 x i1> @insertelt_nxv32i1(<vscale x 32 x i1> %x, i1 %elt) {
+; CHECK-LABEL: insertelt_nxv32i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,m4,ta,mu
+; CHECK-NEXT:    vmv.s.x v28, a0
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    vsetivli a0, 3, e8,m4,tu,mu
+; CHECK-NEXT:    vslideup.vi v8, v28, 2
+; CHECK-NEXT:    vsetvli a0, zero, e8,m4,ta,mu
+; CHECK-NEXT:    vand.vi v28, v8, 1
+; CHECK-NEXT:    vmsne.vi v0, v28, 0
+; CHECK-NEXT:    ret
+  %y = insertelement <vscale x 32 x i1> %x, i1 %elt, i64 2
+  ret <vscale x 32 x i1> %y
+}
+
+define <vscale x 32 x i1> @insertelt_idx_nxv32i1(<vscale x 32 x i1> %x, i1 %elt, i64 %idx) {
+; CHECK-LABEL: insertelt_idx_nxv32i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e8,m4,ta,mu
+; CHECK-NEXT:    vmv.s.x v28, a0
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    addi a0, a1, 1
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vslideup.vx v8, v28, a1
+; CHECK-NEXT:    vsetvli a0, zero, e8,m4,ta,mu
+; CHECK-NEXT:    vand.vi v28, v8, 1
+; CHECK-NEXT:    vmsne.vi v0, v28, 0
+; CHECK-NEXT:    ret
+  %y = insertelement <vscale x 32 x i1> %x, i1 %elt, i64 %idx
+  ret <vscale x 32 x i1> %y
+}
+
+define <vscale x 64 x i1> @insertelt_nxv64i1(<vscale x 64 x i1> %x, i1 %elt) {
+; CHECK-LABEL: insertelt_nxv64i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,m8,ta,mu
+; CHECK-NEXT:    vmv.s.x v8, a0
+; CHECK-NEXT:    vmv.v.i v16, 0
+; CHECK-NEXT:    vmerge.vim v16, v16, 1, v0
+; CHECK-NEXT:    vsetivli a0, 3, e8,m8,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v8, 2
+; CHECK-NEXT:    vsetvli a0, zero, e8,m8,ta,mu
+; CHECK-NEXT:    vand.vi v8, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %y = insertelement <vscale x 64 x i1> %x, i1 %elt, i64 2
+  ret <vscale x 64 x i1> %y
+}
+
+define <vscale x 64 x i1> @insertelt_idx_nxv64i1(<vscale x 64 x i1> %x, i1 %elt, i64 %idx) {
+; CHECK-LABEL: insertelt_idx_nxv64i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e8,m8,ta,mu
+; CHECK-NEXT:    vmv.s.x v8, a0
+; CHECK-NEXT:    vmv.v.i v16, 0
+; CHECK-NEXT:    vmerge.vim v16, v16, 1, v0
+; CHECK-NEXT:    addi a0, a1, 1
+; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v8, a1
+; CHECK-NEXT:    vsetvli a0, zero, e8,m8,ta,mu
+; CHECK-NEXT:    vand.vi v8, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %y = insertelement <vscale x 64 x i1> %x, i1 %elt, i64 %idx
+  ret <vscale x 64 x i1> %y
+}


        


More information about the llvm-commits mailing list