[llvm] 461d499 - [RISCV][test] Add precommit test for D132923.
via llvm-commits
llvm-commits at lists.llvm.org
Thu Sep 15 20:17:49 PDT 2022
Author: jacquesguan
Date: 2022-09-16T11:06:04+08:00
New Revision: 461d49909c2c1bd1d72fcfd53799c6e0267aee49
URL: https://github.com/llvm/llvm-project/commit/461d49909c2c1bd1d72fcfd53799c6e0267aee49
DIFF: https://github.com/llvm/llvm-project/commit/461d49909c2c1bd1d72fcfd53799c6e0267aee49.diff
LOG: [RISCV][test] Add precommit test for D132923.
Added:
Modified:
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll
llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll
index 4274b0e1f2203..bbd6b8e6120c1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll
@@ -189,3 +189,195 @@ define <8 x i32> @vpmerge_vpload2(<8 x i32> %passthru, <8 x i32> * %p, <8 x i32>
%b = call <8 x i32> @llvm.vp.merge.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
ret <8 x i32> %b
}
+
+declare <8 x i16> @llvm.vp.select.nxv2i16(<8 x i1>, <8 x i16>, <8 x i16>, i32)
+declare <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1>, <8 x i32>, <8 x i32>, i32)
+declare <8 x float> @llvm.vp.select.nxv2f32(<8 x i1>, <8 x float>, <8 x float>, i32)
+declare <8 x double> @llvm.vp.select.nxv2f64(<8 x i1>, <8 x double>, <8 x double>, i32)
+
+; Test binary operator with vp.select and vp.add.
+define <8 x i32> @vpselect_vpadd(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, <8 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpadd:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vadd.vv v9, v9, v10
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
+ %a = call <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ ret <8 x i32> %b
+}
+
+; Test glued node of select should not be deleted.
+define <8 x i32> @vpselect_vpadd2(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpadd2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vmseq.vv v0, v9, v10
+; CHECK-NEXT: vadd.vv v9, v9, v10
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
+ %a = call <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
+ %m = call <8 x i1> @llvm.vp.icmp.nxv2i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ ret <8 x i32> %b
+}
+
+; Test vp.select have all-ones mask.
+define <8 x i32> @vpselect_vpadd3(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpadd3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf4, ta, mu
+; CHECK-NEXT: vmset.m v0
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vadd.vv v9, v9, v10
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
+ %a = call <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %mask, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ ret <8 x i32> %b
+}
+
+; Test float binary operator with vp.select and vp.fadd.
+define <8 x float> @vpselect_vpfadd(<8 x float> %passthru, <8 x float> %x, <8 x float> %y, <8 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpfadd:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vfadd.vv v9, v9, v10
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
+ %a = call <8 x float> @llvm.vp.fadd.nxv2f32(<8 x float> %x, <8 x float> %y, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x float> @llvm.vp.select.nxv2f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
+ ret <8 x float> %b
+}
+
+; Test conversion by fptosi.
+define <8 x i16> @vpselect_vpfptosi(<8 x i16> %passthru, <8 x float> %x, <8 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpfptosi:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v9
+; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<8 x float> %x, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i16> @llvm.vp.select.nxv2i16(<8 x i1> %m, <8 x i16> %a, <8 x i16> %passthru, i32 %vl)
+ ret <8 x i16> %b
+}
+
+; Test conversion by sitofp.
+define <8 x float> @vpselect_vpsitofp(<8 x float> %passthru, <8 x i64> %x, <8 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpsitofp:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vfncvt.f.x.w v9, v10
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
+ %a = call <8 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x float> @llvm.vp.select.nxv2f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
+ ret <8 x float> %b
+}
+
+; Test integer extension by vp.zext.
+define <8 x i32> @vpselect_vpzext(<8 x i32> %passthru, <8 x i8> %x, <8 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpzext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vzext.vf4 v10, v9
+; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
+ %a = call <8 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<8 x i8> %x, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ ret <8 x i32> %b
+}
+
+; Test integer truncation by vp.trunc.
+define <8 x i32> @vpselect_vptrunc(<8 x i32> %passthru, <8 x i64> %x, <8 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vptrunc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vnsrl.wi v9, v10, 0
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
+ %a = call <8 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ ret <8 x i32> %b
+}
+
+; Test integer extension by vp.fpext.
+define <8 x double> @vpselect_vpfpext(<8 x double> %passthru, <8 x float> %x, <8 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpfpext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vfwcvt.f.f.v v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
+ %a = call <8 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<8 x float> %x, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x double> @llvm.vp.select.nxv2f64(<8 x i1> %m, <8 x double> %a, <8 x double> %passthru, i32 %vl)
+ ret <8 x double> %b
+}
+
+; Test integer truncation by vp.trunc.
+define <8 x float> @vpselect_vpfptrunc(<8 x float> %passthru, <8 x double> %x, <8 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpfptrunc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vfncvt.f.f.w v9, v10
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
+ %a = call <8 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<8 x double> %x, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x float> @llvm.vp.select.nxv2f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
+ ret <8 x float> %b
+}
+
+; Test load operation by vp.load.
+define <8 x i32> @vpselect_vpload(<8 x i32> %passthru, <8 x i32> * %p, <8 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpload:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vle32.v v9, (a0)
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
+ %a = call <8 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<8 x i32> * %p, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ ret <8 x i32> %b
+}
+
+; Test result have chain and glued node.
+define <8 x i32> @vpselect_vpload2(<8 x i32> %passthru, <8 x i32> * %p, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpload2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vle32.v v11, (a0)
+; CHECK-NEXT: vmseq.vv v0, v9, v10
+; CHECK-NEXT: vmerge.vvm v8, v8, v11, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
+ %a = call <8 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<8 x i32> * %p, <8 x i1> %mask, i32 %vl)
+ %m = call <8 x i1> @llvm.vp.icmp.nxv2i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ ret <8 x i32> %b
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
index 8898e316781dd..242df3423eb74 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
@@ -2,6 +2,7 @@
; RUN: llc < %s -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel | FileCheck %s
declare <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32)
+declare <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32)
declare <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<vscale x 2 x i32> *, <vscale x 2 x i1>, i32)
; Test result has chain output of true operand of merge.vvm.
@@ -15,7 +16,7 @@ define void @vpmerge_vpload_store(<vscale x 2 x i32> %passthru, <vscale x 2 x i3
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
; CHECK-NEXT: $v0 = COPY [[COPY1]]
- ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0
+ ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0 /* tu, mu */
; CHECK-NEXT: VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store unknown-size into %ir.p, align 8)
; CHECK-NEXT: PseudoRET
%splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
@@ -25,3 +26,25 @@ define void @vpmerge_vpload_store(<vscale x 2 x i32> %passthru, <vscale x 2 x i3
store <vscale x 2 x i32> %b, <vscale x 2 x i32> * %p
ret void
}
+
+define void @vpselect_vpload_store(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+ ; CHECK-LABEL: name: vpselect_vpload_store
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $v8, $x10, $v0, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v8
+ ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 [[COPY2]], [[COPY]], 5 /* e32 */ :: (load unknown-size from %ir.p, align 8)
+ ; CHECK-NEXT: $v0 = COPY [[COPY1]]
+ ; CHECK-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[COPY3]], killed [[PseudoVLE32_V_M1_]], $v0, [[COPY]], 5 /* e32 */
+ ; CHECK-NEXT: VS1R_V killed [[PseudoVMERGE_VVM_M1_]], [[COPY2]] :: (store unknown-size into %ir.p, align 8)
+ ; CHECK-NEXT: PseudoRET
+ %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
+ %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+ store <vscale x 2 x i32> %b, <vscale x 2 x i32> * %p
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
index 49d240ae57a2e..ae099eb9b0a09 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
@@ -432,3 +432,453 @@ define <vscale x 2 x i32> @vpmerge_trunc(<vscale x 2 x i32> %passthru, <vscale x
%b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
ret <vscale x 2 x i32> %b
}
+
+declare <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1>, <vscale x 2 x i16>, <vscale x 2 x i16>, i32)
+declare <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32)
+declare <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1>, <vscale x 2 x float>, <vscale x 2 x float>, i32)
+declare <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>, i32)
+
+; Test binary operator with vp.select and vp.smax.
+define <vscale x 2 x i32> @vpselect_vpadd(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpadd:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vadd.vv v9, v9, v10
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
+ %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+ ret <vscale x 2 x i32> %b
+}
+
+; Test glued node of select should not be deleted.
+define <vscale x 2 x i32> @vpselect_vpadd2(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpadd2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vmseq.vv v0, v9, v10
+; CHECK-NEXT: vadd.vv v9, v9, v10
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
+ %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> %mask, i32 %vl)
+ %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+ ret <vscale x 2 x i32> %b
+}
+
+; Test vp.select has all-ones mask.
+define <vscale x 2 x i32> @vpselect_vpadd3(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpadd3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
+; CHECK-NEXT: vmset.m v0
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vadd.vv v9, v9, v10
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
+ %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+ ret <vscale x 2 x i32> %b
+}
+
+; Test float binary operator with vp.select and vp.fadd.
+define <vscale x 2 x float> @vpselect_vpfadd(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpfadd:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vfadd.vv v9, v9, v10
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %a = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %mask, i32 %vl)
+ %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
+ ret <vscale x 2 x float> %b
+}
+
+; Test for binary operator with specific EEW by riscv.vrgatherei16.
+define <vscale x 2 x i32> @vpselect_vrgatherei16(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i16> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vrgatherei16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vrgatherei16.vv v8, v9, v10
+; CHECK-NEXT: ret
+ %1 = zext i32 %vl to i64
+ %2 = tail call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> %x, <vscale x 2 x i16> %y, i64 %1)
+ %3 = tail call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %2, <vscale x 2 x i32> %passthru, i32 %vl)
+ ret <vscale x 2 x i32> %2
+}
+
+; Test conversion by fptosi.
+define <vscale x 2 x i16> @vpselect_vpfptosi(<vscale x 2 x i16> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpfptosi:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v9
+; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %a = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %mask, i32 %vl)
+ %b = call <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %a, <vscale x 2 x i16> %passthru, i32 %vl)
+ ret <vscale x 2 x i16> %b
+}
+
+; Test conversion by sitofp.
+define <vscale x 2 x float> @vpselect_vpsitofp(<vscale x 2 x float> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpsitofp:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vfncvt.f.x.w v9, v10
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %a = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> %mask, i32 %vl)
+ %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
+ ret <vscale x 2 x float> %b
+}
+
+; Test integer extension by vp.zext.
+define <vscale x 2 x i32> @vpselect_vpzext(<vscale x 2 x i32> %passthru, <vscale x 2 x i8> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpzext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vzext.vf4 v10, v9
+; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %a = call <vscale x 2 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i1> %mask, i32 %vl)
+ %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+ ret <vscale x 2 x i32> %b
+}
+
+; Test integer truncation by vp.trunc.
+define <vscale x 2 x i32> @vpselect_vptrunc(<vscale x 2 x i32> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vptrunc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vnsrl.wi v9, v10, 0
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %a = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> %mask, i32 %vl)
+ %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+ ret <vscale x 2 x i32> %b
+}
+
+; Test integer extension by vp.fpext.
+define <vscale x 2 x double> @vpselect_vpfpext(<vscale x 2 x double> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpfpext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vfwcvt.f.f.v v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %a = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %mask, i32 %vl)
+ %b = call <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %a, <vscale x 2 x double> %passthru, i32 %vl)
+ ret <vscale x 2 x double> %b
+}
+
+; Test integer truncation by vp.trunc.
+define <vscale x 2 x float> @vpselect_vpfptrunc(<vscale x 2 x float> %passthru, <vscale x 2 x double> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpfptrunc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vfncvt.f.f.w v9, v10
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %a = call <vscale x 2 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x i1> %mask, i32 %vl)
+ %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
+ ret <vscale x 2 x float> %b
+}
+
+; Test load operation by vp.load.
+define <vscale x 2 x i32> @vpselect_vpload(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpload:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vle32.v v9, (a0)
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
+ %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+ ret <vscale x 2 x i32> %b
+}
+
+; Test result has chain and glued node.
+define <vscale x 2 x i32> @vpselect_vpload2(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpload2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vle32.v v11, (a0)
+; CHECK-NEXT: vmseq.vv v0, v9, v10
+; CHECK-NEXT: vmerge.vvm v8, v8, v11, v0
+; CHECK-NEXT: ret
+ %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
+ %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> %mask, i32 %vl)
+ %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+ ret <vscale x 2 x i32> %b
+}
+
+; Test result has chain output of true operand of select.vvm.
+define void @vpselect_vpload_store(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vpload_store:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vle32.v v9, (a0)
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: vs1r.v v8, (a0)
+; CHECK-NEXT: ret
+ %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
+ %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
+ %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+ store <vscale x 2 x i32> %b, <vscale x 2 x i32> * %p
+ ret void
+}
+
+; FIXME: select vselect.vvm and vleffN.v
+define <vscale x 2 x i32> @vpselect_vleff(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vleff:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vle32ff.v v9, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %1 = zext i32 %vl to i64
+ %a = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i64 %1)
+ %b = extractvalue { <vscale x 2 x i32>, i64 } %a, 0
+ %c = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %b, <vscale x 2 x i32> %passthru, i32 %vl)
+ ret <vscale x 2 x i32> %c
+}
+
+; Test strided load by riscv.vlse
+define <vscale x 2 x i32> @vpselect_vlse(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vlse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-NEXT: vlse32.v v9, (a0), a1
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %1 = zext i32 %vl to i64
+ %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i64 %s, i64 %1)
+ %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+ ret <vscale x 2 x i32> %b
+}
+
+; Test indexed load by riscv.vluxei
+define <vscale x 2 x i32> @vpselect_vluxei(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i64> %idx, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vluxei:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-NEXT: vluxei64.v v9, (a0), v10
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %1 = zext i32 %vl to i64
+ %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, <vscale x 2 x i64> %idx, i64 %1)
+ %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+ ret <vscale x 2 x i32> %b
+}
+
+; Test vector index by riscv.vid
+define <vscale x 2 x i32> @vpselect_vid(<vscale x 2 x i32> %passthru, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vid:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vid.v v9
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %1 = zext i32 %vl to i64
+ %a = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(<vscale x 2 x i32> undef, i64 %1)
+ %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+ ret <vscale x 2 x i32> %b
+}
+
+; Test riscv.viota
+define <vscale x 2 x i32> @vpselect_viota(<vscale x 2 x i32> %passthru, <vscale x 2 x i1> %m, <vscale x 2 x i1> %vm, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_viota:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: viota.m v10, v9
+; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
+; CHECK-NEXT: ret
+ %1 = zext i32 %vl to i64
+ %a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i1> %vm, i64 %1)
+ %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+ ret <vscale x 2 x i32> %b
+}
+
+; Test riscv.vfclass
+define <vscale x 2 x i32> @vpselect_vflcass(<vscale x 2 x i32> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vflcass:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vfclass.v v9, v9
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %1 = zext i32 %vl to i64
+ %a = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x float> %vf, i64 %1)
+ %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+ ret <vscale x 2 x i32> %b
+}
+
+; Test riscv.vfsqrt
+define <vscale x 2 x float> @vpselect_vfsqrt(<vscale x 2 x float> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vfsqrt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vfsqrt.v v9, v9
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %1 = zext i32 %vl to i64
+ %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 %1)
+ %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
+ ret <vscale x 2 x float> %b
+}
+
+; Test reciprocal operation by riscv.vfrec7
+define <vscale x 2 x float> @vpselect_vfrec7(<vscale x 2 x float> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_vfrec7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vfrec7.v v9, v9
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %1 = zext i32 %vl to i64
+ %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 %1)
+ %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
+ ret <vscale x 2 x float> %b
+}
+
+; Test vector operations with VLMAX vector length.
+
+; Test binary operator with vp.select and add.
+define <vscale x 2 x i32> @vpselect_add(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_add:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
+; CHECK-NEXT: vadd.vv v9, v9, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %a = add <vscale x 2 x i32> %x, %y
+ %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+ ret <vscale x 2 x i32> %b
+}
+
+; Test binary operator with vp.select and fadd.
+define <vscale x 2 x float> @vpselect_fadd(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_fadd:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
+; CHECK-NEXT: vfadd.vv v9, v9, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %a = fadd <vscale x 2 x float> %x, %y
+ %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
+ ret <vscale x 2 x float> %b
+}
+
+; Test conversion by fptosi.
+define <vscale x 2 x i16> @vpselect_fptosi(<vscale x 2 x i16> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_fptosi:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
+; CHECK-NEXT: ret
+ %a = fptosi <vscale x 2 x float> %x to <vscale x 2 x i16>
+ %b = call <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %a, <vscale x 2 x i16> %passthru, i32 %vl)
+ ret <vscale x 2 x i16> %b
+}
+
+; Test conversion by sitofp.
+define <vscale x 2 x float> @vpselect_sitofp(<vscale x 2 x float> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_sitofp:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
+; CHECK-NEXT: vfncvt.f.x.w v9, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %a = sitofp <vscale x 2 x i64> %x to <vscale x 2 x float>
+ %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
+ ret <vscale x 2 x float> %b
+}
+
+; Test float extension by fpext.
+define <vscale x 2 x double> @vpselect_fpext(<vscale x 2 x double> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_fpext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
+; CHECK-NEXT: vfwcvt.f.f.v v12, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0
+; CHECK-NEXT: ret
+ %a = fpext <vscale x 2 x float> %x to <vscale x 2 x double>
+ %b = call <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %a, <vscale x 2 x double> %passthru, i32 %vl)
+ ret <vscale x 2 x double> %b
+}
+
+; Test float truncation by fptrunc.
+define <vscale x 2 x float> @vpselect_fptrunc(<vscale x 2 x float> %passthru, <vscale x 2 x double> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_fptrunc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
+; CHECK-NEXT: vfncvt.f.f.w v9, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %a = fptrunc <vscale x 2 x double> %x to <vscale x 2 x float>
+ %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
+ ret <vscale x 2 x float> %b
+}
+
+; Test integer extension by zext.
+define <vscale x 2 x i32> @vpselect_zext(<vscale x 2 x i32> %passthru, <vscale x 2 x i8> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
+; CHECK-NEXT: vzext.vf4 v10, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
+; CHECK-NEXT: ret
+ %a = zext <vscale x 2 x i8> %x to <vscale x 2 x i32>
+ %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+ ret <vscale x 2 x i32> %b
+}
+
+; Test integer truncation by trunc.
+define <vscale x 2 x i32> @vpselect_trunc(<vscale x 2 x i32> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpselect_trunc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
+; CHECK-NEXT: vnsrl.wi v9, v10, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT: ret
+ %a = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
+ %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+ ret <vscale x 2 x i32> %b
+}
More information about the llvm-commits
mailing list