[llvm-branch-commits] [llvm] b02eab9 - [RISCV] Add scalable vector icmp ISel patterns
Fraser Cormack via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Sat Jan 9 13:06:06 PST 2021
Author: Fraser Cormack
Date: 2021-01-09T20:54:34Z
New Revision: b02eab9058e58782fca32dd8b1e53c27ed93f866
URL: https://github.com/llvm/llvm-project/commit/b02eab9058e58782fca32dd8b1e53c27ed93f866
DIFF: https://github.com/llvm/llvm-project/commit/b02eab9058e58782fca32dd8b1e53c27ed93f866.diff
LOG: [RISCV] Add scalable vector icmp ISel patterns
Original patch by @rogfer01.
The RVV integer comparison instructions are defined in such a way that
many LLVM operations are defined by using the "opposite" comparison
instruction and swapping the operands. This is done in this patch in
most cases, except for the mappings where the immediate range must be
adjusted to accomodate:
va < i --> vmsle{u}.vi vd, va, i-1, vm
va >= i --> vmsgt{u}.vi vd, va, i-1, vm
That is left for future optimization; this patch supports all operations
but in the case of the missing mappings the immediate will be moved to
a scalar register first.
Since there are so many condition codes and operand cases to check, it
was decided to reduce the test burden by only testing the "vscale x 8"
vector types.
Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: Fraser Cormack <fraser at codeplay.com>
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D94168
Added:
llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll
llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll
Modified:
llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index e158b632aa73..0ef798937a66 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -35,6 +35,10 @@ def SplatPat : ComplexPattern<vAny, 1, "selectVSplat", [], [], 1>;
def SplatPat_simm5 : ComplexPattern<vAny, 1, "selectVSplatSimm5", []>;
def SplatPat_uimm5 : ComplexPattern<vAny, 1, "selectVSplatUimm5", []>;
+class SwapHelper<dag Prefix, dag A, dag B, dag Suffix, bit swap> {
+ dag Value = !con(Prefix, !if(swap, B, A), !if(swap, A, B), Suffix);
+}
+
multiclass VPatUSLoadStoreSDNode<LLVMType type,
LLVMType mask_type,
int sew,
@@ -128,6 +132,66 @@ multiclass VPatBinarySDNode_VV_VX_VI<SDNode vop, string instruction_name,
}
}
+multiclass VPatIntegerSetCCSDNode_VV<CondCode cc,
+ string instruction_name,
+ bit swap = 0> {
+ foreach vti = AllIntegerVectors in {
+ defvar instruction = !cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX);
+ def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1),
+ (vti.Vector vti.RegClass:$rs2), cc)),
+ SwapHelper<(instruction),
+ (instruction vti.RegClass:$rs1),
+ (instruction vti.RegClass:$rs2),
+ (instruction VLMax, vti.SEW),
+ swap>.Value>;
+ }
+}
+
+multiclass VPatIntegerSetCCSDNode_XI<CondCode cc,
+ string instruction_name,
+ string kind,
+ ComplexPattern SplatPatKind,
+ DAGOperand xop_kind,
+ bit swap = 0> {
+ foreach vti = AllIntegerVectors in {
+ defvar instruction = !cast<Instruction>(instruction_name#_#kind#_#vti.LMul.MX);
+ def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1),
+ (vti.Vector (SplatPatKind xop_kind:$rs2)), cc)),
+ SwapHelper<(instruction),
+ (instruction vti.RegClass:$rs1),
+ (instruction xop_kind:$rs2),
+ (instruction VLMax, vti.SEW),
+ swap>.Value>;
+ }
+}
+
+multiclass VPatIntegerSetCCSDNode_VV_VX_VI<CondCode cc,
+ string instruction_name,
+ bit swap = 0> {
+ defm : VPatIntegerSetCCSDNode_VV<cc, instruction_name, swap>;
+ defm : VPatIntegerSetCCSDNode_XI<cc, instruction_name, "VX",
+ SplatPat, GPR, swap>;
+ defm : VPatIntegerSetCCSDNode_XI<cc, instruction_name, "VI",
+ SplatPat_simm5, simm5, swap>;
+}
+
+multiclass VPatIntegerSetCCSDNode_VV_VX<CondCode cc,
+ string instruction_name,
+ bit swap = 0> {
+ defm : VPatIntegerSetCCSDNode_VV<cc, instruction_name, swap>;
+ defm : VPatIntegerSetCCSDNode_XI<cc, instruction_name, "VX",
+ SplatPat, GPR, swap>;
+}
+
+multiclass VPatIntegerSetCCSDNode_VX_VI<CondCode cc,
+ string instruction_name,
+ bit swap = 0> {
+ defm : VPatIntegerSetCCSDNode_XI<cc, instruction_name, "VX",
+ SplatPat, GPR, swap>;
+ defm : VPatIntegerSetCCSDNode_XI<cc, instruction_name, "VI",
+ SplatPat_simm5, simm5, swap>;
+}
+
//===----------------------------------------------------------------------===//
// Patterns.
//===----------------------------------------------------------------------===//
@@ -164,6 +228,28 @@ defm "" : VPatBinarySDNode_VV_VX_VI<shl, "PseudoVSLL", uimm5>;
defm "" : VPatBinarySDNode_VV_VX_VI<srl, "PseudoVSRL", uimm5>;
defm "" : VPatBinarySDNode_VV_VX_VI<sra, "PseudoVSRA", uimm5>;
+// 12.8. Vector Integer Comparison Instructions
+defm "" : VPatIntegerSetCCSDNode_VV_VX_VI<SETEQ, "PseudoVMSEQ">;
+defm "" : VPatIntegerSetCCSDNode_VV_VX_VI<SETNE, "PseudoVMSNE">;
+
+// FIXME: Support immediate forms of these by choosing SLE decrementing the
+// immediate
+defm "" : VPatIntegerSetCCSDNode_VV_VX<SETLT, "PseudoVMSLT">;
+defm "" : VPatIntegerSetCCSDNode_VV_VX<SETULT, "PseudoVMSLTU">;
+
+defm "" : VPatIntegerSetCCSDNode_VV<SETGT, "PseudoVMSLT", /*swap*/1>;
+defm "" : VPatIntegerSetCCSDNode_VV<SETUGT, "PseudoVMSLTU", /*swap*/1>;
+defm "" : VPatIntegerSetCCSDNode_VX_VI<SETGT, "PseudoVMSGT">;
+defm "" : VPatIntegerSetCCSDNode_VX_VI<SETUGT, "PseudoVMSGTU">;
+
+defm "" : VPatIntegerSetCCSDNode_VV_VX_VI<SETLE, "PseudoVMSLE">;
+defm "" : VPatIntegerSetCCSDNode_VV_VX_VI<SETULE, "PseudoVMSLEU">;
+
+// FIXME: Support immediate forms of these by choosing SGT and decrementing the
+// immediate
+defm "" : VPatIntegerSetCCSDNode_VV<SETGE, "PseudoVMSLE", /*swap*/1>;
+defm "" : VPatIntegerSetCCSDNode_VV<SETUGE, "PseudoVMSLEU", /*swap*/1>;
+
// 12.9. Vector Integer Min/Max Instructions
defm "" : VPatBinarySDNode_VV_VX<umin, "PseudoVMINU">;
defm "" : VPatBinarySDNode_VV_VX<smin, "PseudoVMIN">;
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll
new file mode 100644
index 000000000000..85562131f08b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll
@@ -0,0 +1,3128 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+; FIXME: The scalar/vector operations ('xv' and 'iv' tests) should swap
+; operands and condition codes accordingly in order to generate a 'vx' or 'vi'
+; instruction.
+
+define <vscale x 8 x i1> @icmp_eq_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_eq_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp eq <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_eq_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_eq_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmseq.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_eq_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_ne_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsne.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp ne <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ne_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsne.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ne_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsne.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ne_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsne.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_ugt_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v17, v16
+; CHECK-NEXT: ret
+ %vc = icmp ugt <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ugt_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgtu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ugt_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsltu.vv v0, v16, v25
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ugt_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_uge_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v17, v16
+; CHECK-NEXT: ret
+ %vc = icmp uge <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_uge_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_uge_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsleu.vv v0, v16, v25
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, -16
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, 15
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmset.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, 1
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 1, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_4(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, -15
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_5(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_ult_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp ult <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ult_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ult_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsltu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmclr.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 1, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_4(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_ule_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp ule <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ule_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ule_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ule_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_sgt_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v17, v16
+; CHECK-NEXT: ret
+ %vc = icmp sgt <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sgt_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sgt_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmslt.vv v0, v16, v25
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sgt_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_sge_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v17, v16
+; CHECK-NEXT: ret
+ %vc = icmp sge <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sge_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sge_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsle.vv v0, v16, v25
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, -16
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, -15
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i8_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, 0
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i8_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_slt_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp slt <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_slt_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_slt_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmslt.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i8_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, zero
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i8_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_sle_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp sle <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sle_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sle_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sle_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_eq_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp eq <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_eq_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_eq_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmseq.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_eq_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_ne_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsne.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp ne <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ne_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsne.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ne_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsne.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ne_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsne.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_ugt_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v18, v16
+; CHECK-NEXT: ret
+ %vc = icmp ugt <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ugt_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgtu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ugt_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsltu.vv v0, v16, v26
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ugt_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_uge_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v18, v16
+; CHECK-NEXT: ret
+ %vc = icmp uge <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_uge_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_uge_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsleu.vv v0, v16, v26
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, -16
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, 15
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmset.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, 1
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 1, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_4(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, -15
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_5(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_ult_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp ult <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ult_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ult_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsltu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmclr.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 1, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_4(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_ule_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp ule <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ule_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ule_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ule_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_sgt_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v18, v16
+; CHECK-NEXT: ret
+ %vc = icmp sgt <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sgt_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sgt_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmslt.vv v0, v16, v26
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sgt_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_sge_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v18, v16
+; CHECK-NEXT: ret
+ %vc = icmp sge <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sge_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sge_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsle.vv v0, v16, v26
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, -16
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, -15
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i16_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, 0
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i16_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_slt_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp slt <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_slt_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_slt_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmslt.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i16_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, zero
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i16_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_sle_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp sle <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sle_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sle_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sle_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_eq_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp eq <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_eq_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_eq_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmseq.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_eq_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_ne_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsne.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp ne <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ne_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsne.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ne_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsne.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ne_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsne.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_ugt_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v20, v16
+; CHECK-NEXT: ret
+ %vc = icmp ugt <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ugt_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgtu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ugt_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsltu.vv v0, v16, v28
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ugt_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_uge_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v20, v16
+; CHECK-NEXT: ret
+ %vc = icmp uge <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_uge_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_uge_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsleu.vv v0, v16, v28
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, -16
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, 15
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmset.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, 1
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 1, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_4(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, -15
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_5(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_ult_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp ult <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ult_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ult_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsltu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmclr.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 1, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_4(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_ule_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp ule <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ule_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ule_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ule_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_sgt_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v20, v16
+; CHECK-NEXT: ret
+ %vc = icmp sgt <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sgt_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sgt_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmslt.vv v0, v16, v28
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sgt_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_sge_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v20, v16
+; CHECK-NEXT: ret
+ %vc = icmp sge <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sge_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sge_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsle.vv v0, v16, v28
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, -16
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, -15
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i32_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, 0
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i32_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_slt_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp slt <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_slt_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_slt_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmslt.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i32_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, zero
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i32_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_sle_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp sle <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sle_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sle_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sle_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_eq_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmseq.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp eq <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_eq_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmseq.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_eq_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmseq.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_eq_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_ne_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsne.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp ne <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ne_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsne.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ne_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsne.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ne_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsne.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_ugt_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsltu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %vc = icmp ugt <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ugt_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsltu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ugt_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsltu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ugt_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_uge_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %vc = icmp uge <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_uge_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_uge_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsleu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, -16
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, 15
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmset.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, 1
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 1, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_4(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, -15
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_5(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_ult_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsltu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp ult <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ult_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsltu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ult_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsltu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmclr.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 1, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_4(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_ule_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsleu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp ule <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ule_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsleu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ule_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ule_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_sgt_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmslt.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %vc = icmp sgt <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sgt_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmslt.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sgt_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmslt.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sgt_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_sge_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %vc = icmp sge <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sge_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sge_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsle.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, -16
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, -15
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i64_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i64_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_slt_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmslt.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp slt <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_slt_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmslt.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_slt_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmslt.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i64_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, zero
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i64_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_sle_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsle.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp sle <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sle_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsle.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sle_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sle_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+; Check a setcc with two constant splats, which would previously get stuck in
+; an infinite loop. DAGCombine isn't clever enough to constant-fold
+; splat_vectors but could continuously swap the operands, trying to put the
+; splat on the RHS.
+define <vscale x 8 x i1> @icmp_eq_ii_nxv8i8() {
+; CHECK-LABEL: icmp_eq_ii_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, 5
+; CHECK-NEXT: vmseq.vi v0, v25, 2
+; CHECK-NEXT: ret
+ %heada = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splata = shufflevector <vscale x 8 x i8> %heada, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %headb = insertelement <vscale x 8 x i8> undef, i8 2, i32 0
+ %splatb = shufflevector <vscale x 8 x i8> %headb, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %splata, %splatb
+ ret <vscale x 8 x i1> %vc
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll
new file mode 100644
index 000000000000..180b9044a3f6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll
@@ -0,0 +1,2981 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+; FIXME: The scalar/vector operations ('xv' and 'iv' tests) should swap
+; operands and condition codes accordingly in order to generate a 'vx' or 'vi'
+; instruction.
+
+define <vscale x 8 x i1> @icmp_eq_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_eq_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp eq <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_eq_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_eq_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmseq.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_eq_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_ne_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsne.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp ne <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ne_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsne.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ne_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsne.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ne_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsne.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_ugt_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v17, v16
+; CHECK-NEXT: ret
+ %vc = icmp ugt <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ugt_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgtu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ugt_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsltu.vv v0, v16, v25
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ugt_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_uge_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v17, v16
+; CHECK-NEXT: ret
+ %vc = icmp uge <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_uge_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_uge_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsleu.vv v0, v16, v25
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, -16
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, 15
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmset.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, 1
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 1, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_4(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, -15
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_5(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_ult_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp ult <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ult_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ult_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsltu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmclr.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 1, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_4(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_ule_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp ule <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ule_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ule_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ule_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_sgt_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v17, v16
+; CHECK-NEXT: ret
+ %vc = icmp sgt <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sgt_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sgt_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmslt.vv v0, v16, v25
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sgt_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_sge_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v17, v16
+; CHECK-NEXT: ret
+ %vc = icmp sge <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sge_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sge_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsle.vv v0, v16, v25
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, -16
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, -15
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i8_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, 0
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i8_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_slt_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp slt <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_slt_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_slt_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmslt.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i8_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, zero
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i8_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_sle_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp sle <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sle_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sle_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sle_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_eq_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp eq <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_eq_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_eq_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmseq.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_eq_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_ne_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsne.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp ne <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ne_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsne.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ne_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsne.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ne_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsne.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_ugt_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v18, v16
+; CHECK-NEXT: ret
+ %vc = icmp ugt <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ugt_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgtu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ugt_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsltu.vv v0, v16, v26
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ugt_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_uge_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v18, v16
+; CHECK-NEXT: ret
+ %vc = icmp uge <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_uge_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_uge_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsleu.vv v0, v16, v26
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, -16
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, 15
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmset.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, 1
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 1, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_4(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, -15
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_5(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_ult_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp ult <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ult_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ult_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsltu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmclr.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 1, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_4(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_ule_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp ule <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ule_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ule_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ule_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_sgt_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v18, v16
+; CHECK-NEXT: ret
+ %vc = icmp sgt <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sgt_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sgt_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmslt.vv v0, v16, v26
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sgt_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_sge_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v18, v16
+; CHECK-NEXT: ret
+ %vc = icmp sge <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sge_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sge_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsle.vv v0, v16, v26
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, -16
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, -15
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i16_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, 0
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i16_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_slt_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp slt <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_slt_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_slt_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmslt.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i16_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, zero
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i16_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_sle_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp sle <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sle_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sle_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sle_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_eq_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp eq <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_eq_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_eq_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmseq.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_eq_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_ne_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsne.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp ne <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ne_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsne.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ne_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsne.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ne_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsne.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_ugt_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v20, v16
+; CHECK-NEXT: ret
+ %vc = icmp ugt <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ugt_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgtu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ugt_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsltu.vv v0, v16, v28
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ugt_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_uge_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v20, v16
+; CHECK-NEXT: ret
+ %vc = icmp uge <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_uge_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_uge_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsleu.vv v0, v16, v28
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, -16
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, 15
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmset.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, 1
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 1, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_4(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, -15
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_5(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_ult_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp ult <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ult_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ult_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsltu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmclr.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 1, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_4(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_ule_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp ule <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ule_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ule_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ule_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_sgt_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v20, v16
+; CHECK-NEXT: ret
+ %vc = icmp sgt <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sgt_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sgt_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmslt.vv v0, v16, v28
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sgt_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_sge_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v20, v16
+; CHECK-NEXT: ret
+ %vc = icmp sge <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sge_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sge_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsle.vv v0, v16, v28
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, -16
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, -15
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i32_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, 0
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i32_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_slt_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp slt <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_slt_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_slt_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmslt.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i32_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, zero
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i32_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_sle_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp sle <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sle_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sle_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sle_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_eq_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmseq.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp eq <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_eq_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_eq_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmseq.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_eq_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_ne_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsne.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp ne <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ne_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsne.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ne_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsne.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ne_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsne.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_ugt_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsltu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %vc = icmp ugt <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ugt_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgtu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ugt_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsltu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ugt_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_uge_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %vc = icmp uge <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_uge_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_uge_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsleu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, -16
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, 15
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmset.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, 1
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 1, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_4(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, -15
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_5(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_ult_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsltu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp ult <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ult_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ult_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsltu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmclr.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 1, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_4(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_ule_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsleu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp ule <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ule_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsleu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ule_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ule_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_sgt_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmslt.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %vc = icmp sgt <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sgt_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sgt_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmslt.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sgt_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_sge_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %vc = icmp sge <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sge_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sge_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsle.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, -16
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, -15
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i64_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i64_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_slt_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmslt.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp slt <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_slt_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_slt_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmslt.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i64_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, zero
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i64_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_sle_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsle.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp sle <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sle_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsle.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sle_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sle_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
More information about the llvm-branch-commits
mailing list